]> Pileus Git - ~andy/linux/blob - drivers/net/virtio_net.c
3db65867895be0e9fad6de1fe8ff1f8e46c8e134
[~andy/linux] / drivers / net / virtio_net.c
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 //#define DEBUG
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/module.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_net.h>
26 #include <linux/scatterlist.h>
27 #include <linux/if_vlan.h>
28 #include <linux/slab.h>
29
30 static int napi_weight = 128;
31 module_param(napi_weight, int, 0444);
32
33 static bool csum = true, gso = true;
34 module_param(csum, bool, 0444);
35 module_param(gso, bool, 0444);
36
37 /* FIXME: MTU in config. */
38 #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
39 #define GOOD_COPY_LEN   128
40
41 #define VIRTNET_SEND_COMMAND_SG_MAX    2
42 #define VIRTNET_DRIVER_VERSION "1.0.0"
43
44 struct virtnet_stats {
45         struct u64_stats_sync tx_syncp;
46         struct u64_stats_sync rx_syncp;
47         u64 tx_bytes;
48         u64 tx_packets;
49
50         u64 rx_bytes;
51         u64 rx_packets;
52 };
53
54 struct virtnet_info {
55         struct virtio_device *vdev;
56         struct virtqueue *rvq, *svq, *cvq;
57         struct net_device *dev;
58         struct napi_struct napi;
59         unsigned int status;
60
61         /* Number of input buffers, and max we've ever had. */
62         unsigned int num, max;
63
64         /* I like... big packets and I cannot lie! */
65         bool big_packets;
66
67         /* Host will merge rx buffers for big packets (shake it! shake it!) */
68         bool mergeable_rx_bufs;
69
70         /* enable config space updates */
71         bool config_enable;
72
73         /* Active statistics */
74         struct virtnet_stats __percpu *stats;
75
76         /* Work struct for refilling if we run low on memory. */
77         struct delayed_work refill;
78
79         /* Work struct for config space updates */
80         struct work_struct config_work;
81
82         /* Lock for config space updates */
83         struct mutex config_lock;
84
85         /* Chain pages by the private ptr. */
86         struct page *pages;
87
88         /* fragments + linear part + virtio header */
89         struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
90         struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
91 };
92
93 struct skb_vnet_hdr {
94         union {
95                 struct virtio_net_hdr hdr;
96                 struct virtio_net_hdr_mrg_rxbuf mhdr;
97         };
98         unsigned int num_sg;
99 };
100
101 struct padded_vnet_hdr {
102         struct virtio_net_hdr hdr;
103         /*
104          * virtio_net_hdr should be in a separated sg buffer because of a
105          * QEMU bug, and data sg buffer shares same page with this header sg.
106          * This padding makes next sg 16 byte aligned after virtio_net_hdr.
107          */
108         char padding[6];
109 };
110
111 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
112 {
113         return (struct skb_vnet_hdr *)skb->cb;
114 }
115
116 /*
117  * private is used to chain pages for big packets, put the whole
118  * most recent used list in the beginning for reuse
119  */
120 static void give_pages(struct virtnet_info *vi, struct page *page)
121 {
122         struct page *end;
123
124         /* Find end of list, sew whole thing into vi->pages. */
125         for (end = page; end->private; end = (struct page *)end->private);
126         end->private = (unsigned long)vi->pages;
127         vi->pages = page;
128 }
129
130 static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
131 {
132         struct page *p = vi->pages;
133
134         if (p) {
135                 vi->pages = (struct page *)p->private;
136                 /* clear private here, it is used to chain pages */
137                 p->private = 0;
138         } else
139                 p = alloc_page(gfp_mask);
140         return p;
141 }
142
143 static void skb_xmit_done(struct virtqueue *svq)
144 {
145         struct virtnet_info *vi = svq->vdev->priv;
146
147         /* Suppress further interrupts. */
148         virtqueue_disable_cb(svq);
149
150         /* We were probably waiting for more output buffers. */
151         netif_wake_queue(vi->dev);
152 }
153
154 static void set_skb_frag(struct sk_buff *skb, struct page *page,
155                          unsigned int offset, unsigned int *len)
156 {
157         int size = min((unsigned)PAGE_SIZE - offset, *len);
158         int i = skb_shinfo(skb)->nr_frags;
159
160         __skb_fill_page_desc(skb, i, page, offset, size);
161
162         skb->data_len += size;
163         skb->len += size;
164         skb->truesize += PAGE_SIZE;
165         skb_shinfo(skb)->nr_frags++;
166         *len -= size;
167 }
168
169 /* Called from bottom half context */
170 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
171                                    struct page *page, unsigned int len)
172 {
173         struct sk_buff *skb;
174         struct skb_vnet_hdr *hdr;
175         unsigned int copy, hdr_len, offset;
176         char *p;
177
178         p = page_address(page);
179
180         /* copy small packet so we can reuse these pages for small data */
181         skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
182         if (unlikely(!skb))
183                 return NULL;
184
185         hdr = skb_vnet_hdr(skb);
186
187         if (vi->mergeable_rx_bufs) {
188                 hdr_len = sizeof hdr->mhdr;
189                 offset = hdr_len;
190         } else {
191                 hdr_len = sizeof hdr->hdr;
192                 offset = sizeof(struct padded_vnet_hdr);
193         }
194
195         memcpy(hdr, p, hdr_len);
196
197         len -= hdr_len;
198         p += offset;
199
200         copy = len;
201         if (copy > skb_tailroom(skb))
202                 copy = skb_tailroom(skb);
203         memcpy(skb_put(skb, copy), p, copy);
204
205         len -= copy;
206         offset += copy;
207
208         /*
209          * Verify that we can indeed put this data into a skb.
210          * This is here to handle cases when the device erroneously
211          * tries to receive more than is possible. This is usually
212          * the case of a broken device.
213          */
214         if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
215                 if (net_ratelimit())
216                         pr_debug("%s: too much data\n", skb->dev->name);
217                 dev_kfree_skb(skb);
218                 return NULL;
219         }
220
221         while (len) {
222                 set_skb_frag(skb, page, offset, &len);
223                 page = (struct page *)page->private;
224                 offset = 0;
225         }
226
227         if (page)
228                 give_pages(vi, page);
229
230         return skb;
231 }
232
233 static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
234 {
235         struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
236         struct page *page;
237         int num_buf, i, len;
238
239         num_buf = hdr->mhdr.num_buffers;
240         while (--num_buf) {
241                 i = skb_shinfo(skb)->nr_frags;
242                 if (i >= MAX_SKB_FRAGS) {
243                         pr_debug("%s: packet too long\n", skb->dev->name);
244                         skb->dev->stats.rx_length_errors++;
245                         return -EINVAL;
246                 }
247                 page = virtqueue_get_buf(vi->rvq, &len);
248                 if (!page) {
249                         pr_debug("%s: rx error: %d buffers missing\n",
250                                  skb->dev->name, hdr->mhdr.num_buffers);
251                         skb->dev->stats.rx_length_errors++;
252                         return -EINVAL;
253                 }
254
255                 if (len > PAGE_SIZE)
256                         len = PAGE_SIZE;
257
258                 set_skb_frag(skb, page, 0, &len);
259
260                 --vi->num;
261         }
262         return 0;
263 }
264
265 static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
266 {
267         struct virtnet_info *vi = netdev_priv(dev);
268         struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
269         struct sk_buff *skb;
270         struct page *page;
271         struct skb_vnet_hdr *hdr;
272
273         if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
274                 pr_debug("%s: short packet %i\n", dev->name, len);
275                 dev->stats.rx_length_errors++;
276                 if (vi->mergeable_rx_bufs || vi->big_packets)
277                         give_pages(vi, buf);
278                 else
279                         dev_kfree_skb(buf);
280                 return;
281         }
282
283         if (!vi->mergeable_rx_bufs && !vi->big_packets) {
284                 skb = buf;
285                 len -= sizeof(struct virtio_net_hdr);
286                 skb_trim(skb, len);
287         } else {
288                 page = buf;
289                 skb = page_to_skb(vi, page, len);
290                 if (unlikely(!skb)) {
291                         dev->stats.rx_dropped++;
292                         give_pages(vi, page);
293                         return;
294                 }
295                 if (vi->mergeable_rx_bufs)
296                         if (receive_mergeable(vi, skb)) {
297                                 dev_kfree_skb(skb);
298                                 return;
299                         }
300         }
301
302         hdr = skb_vnet_hdr(skb);
303
304         u64_stats_update_begin(&stats->rx_syncp);
305         stats->rx_bytes += skb->len;
306         stats->rx_packets++;
307         u64_stats_update_end(&stats->rx_syncp);
308
309         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
310                 pr_debug("Needs csum!\n");
311                 if (!skb_partial_csum_set(skb,
312                                           hdr->hdr.csum_start,
313                                           hdr->hdr.csum_offset))
314                         goto frame_err;
315         } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
316                 skb->ip_summed = CHECKSUM_UNNECESSARY;
317         }
318
319         skb->protocol = eth_type_trans(skb, dev);
320         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
321                  ntohs(skb->protocol), skb->len, skb->pkt_type);
322
323         if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
324                 pr_debug("GSO!\n");
325                 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
326                 case VIRTIO_NET_HDR_GSO_TCPV4:
327                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
328                         break;
329                 case VIRTIO_NET_HDR_GSO_UDP:
330                         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
331                         break;
332                 case VIRTIO_NET_HDR_GSO_TCPV6:
333                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
334                         break;
335                 default:
336                         if (net_ratelimit())
337                                 printk(KERN_WARNING "%s: bad gso type %u.\n",
338                                        dev->name, hdr->hdr.gso_type);
339                         goto frame_err;
340                 }
341
342                 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
343                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
344
345                 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
346                 if (skb_shinfo(skb)->gso_size == 0) {
347                         if (net_ratelimit())
348                                 printk(KERN_WARNING "%s: zero gso size.\n",
349                                        dev->name);
350                         goto frame_err;
351                 }
352
353                 /* Header must be checked, and gso_segs computed. */
354                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
355                 skb_shinfo(skb)->gso_segs = 0;
356         }
357
358         netif_receive_skb(skb);
359         return;
360
361 frame_err:
362         dev->stats.rx_frame_errors++;
363         dev_kfree_skb(skb);
364 }
365
366 static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
367 {
368         struct sk_buff *skb;
369         struct skb_vnet_hdr *hdr;
370         int err;
371
372         skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
373         if (unlikely(!skb))
374                 return -ENOMEM;
375
376         skb_put(skb, MAX_PACKET_LEN);
377
378         hdr = skb_vnet_hdr(skb);
379         sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
380
381         skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
382
383         err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
384         if (err < 0)
385                 dev_kfree_skb(skb);
386
387         return err;
388 }
389
390 static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
391 {
392         struct page *first, *list = NULL;
393         char *p;
394         int i, err, offset;
395
396         /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
397         for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
398                 first = get_a_page(vi, gfp);
399                 if (!first) {
400                         if (list)
401                                 give_pages(vi, list);
402                         return -ENOMEM;
403                 }
404                 sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
405
406                 /* chain new page in list head to match sg */
407                 first->private = (unsigned long)list;
408                 list = first;
409         }
410
411         first = get_a_page(vi, gfp);
412         if (!first) {
413                 give_pages(vi, list);
414                 return -ENOMEM;
415         }
416         p = page_address(first);
417
418         /* vi->rx_sg[0], vi->rx_sg[1] share the same page */
419         /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
420         sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
421
422         /* vi->rx_sg[1] for data packet, from offset */
423         offset = sizeof(struct padded_vnet_hdr);
424         sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
425
426         /* chain first in list head */
427         first->private = (unsigned long)list;
428         err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
429                                 first, gfp);
430         if (err < 0)
431                 give_pages(vi, first);
432
433         return err;
434 }
435
436 static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
437 {
438         struct page *page;
439         int err;
440
441         page = get_a_page(vi, gfp);
442         if (!page)
443                 return -ENOMEM;
444
445         sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
446
447         err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
448         if (err < 0)
449                 give_pages(vi, page);
450
451         return err;
452 }
453
454 /*
455  * Returns false if we couldn't fill entirely (OOM).
456  *
457  * Normally run in the receive path, but can also be run from ndo_open
458  * before we're receiving packets, or from refill_work which is
459  * careful to disable receiving (using napi_disable).
460  */
461 static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
462 {
463         int err;
464         bool oom;
465
466         do {
467                 if (vi->mergeable_rx_bufs)
468                         err = add_recvbuf_mergeable(vi, gfp);
469                 else if (vi->big_packets)
470                         err = add_recvbuf_big(vi, gfp);
471                 else
472                         err = add_recvbuf_small(vi, gfp);
473
474                 oom = err == -ENOMEM;
475                 if (err < 0)
476                         break;
477                 ++vi->num;
478         } while (err > 0);
479         if (unlikely(vi->num > vi->max))
480                 vi->max = vi->num;
481         virtqueue_kick(vi->rvq);
482         return !oom;
483 }
484
485 static void skb_recv_done(struct virtqueue *rvq)
486 {
487         struct virtnet_info *vi = rvq->vdev->priv;
488         /* Schedule NAPI, Suppress further interrupts if successful. */
489         if (napi_schedule_prep(&vi->napi)) {
490                 virtqueue_disable_cb(rvq);
491                 __napi_schedule(&vi->napi);
492         }
493 }
494
495 static void virtnet_napi_enable(struct virtnet_info *vi)
496 {
497         napi_enable(&vi->napi);
498
499         /* If all buffers were filled by other side before we napi_enabled, we
500          * won't get another interrupt, so process any outstanding packets
501          * now.  virtnet_poll wants re-enable the queue, so we disable here.
502          * We synchronize against interrupts via NAPI_STATE_SCHED */
503         if (napi_schedule_prep(&vi->napi)) {
504                 virtqueue_disable_cb(vi->rvq);
505                 local_bh_disable();
506                 __napi_schedule(&vi->napi);
507                 local_bh_enable();
508         }
509 }
510
511 static void refill_work(struct work_struct *work)
512 {
513         struct virtnet_info *vi;
514         bool still_empty;
515
516         vi = container_of(work, struct virtnet_info, refill.work);
517         napi_disable(&vi->napi);
518         still_empty = !try_fill_recv(vi, GFP_KERNEL);
519         virtnet_napi_enable(vi);
520
521         /* In theory, this can happen: if we don't get any buffers in
522          * we will *never* try to fill again. */
523         if (still_empty)
524                 schedule_delayed_work(&vi->refill, HZ/2);
525 }
526
527 static int virtnet_poll(struct napi_struct *napi, int budget)
528 {
529         struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
530         void *buf;
531         unsigned int len, received = 0;
532
533 again:
534         while (received < budget &&
535                (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
536                 receive_buf(vi->dev, buf, len);
537                 --vi->num;
538                 received++;
539         }
540
541         if (vi->num < vi->max / 2) {
542                 if (!try_fill_recv(vi, GFP_ATOMIC))
543                         schedule_delayed_work(&vi->refill, 0);
544         }
545
546         /* Out of packets? */
547         if (received < budget) {
548                 napi_complete(napi);
549                 if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
550                     napi_schedule_prep(napi)) {
551                         virtqueue_disable_cb(vi->rvq);
552                         __napi_schedule(napi);
553                         goto again;
554                 }
555         }
556
557         return received;
558 }
559
560 static void free_old_xmit_skbs(struct virtnet_info *vi)
561 {
562         struct sk_buff *skb;
563         unsigned int len;
564         struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
565
566         while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
567                 pr_debug("Sent skb %p\n", skb);
568
569                 u64_stats_update_begin(&stats->tx_syncp);
570                 stats->tx_bytes += skb->len;
571                 stats->tx_packets++;
572                 u64_stats_update_end(&stats->tx_syncp);
573
574                 dev_kfree_skb_any(skb);
575         }
576 }
577
578 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
579 {
580         struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
581         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
582
583         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
584
585         if (skb->ip_summed == CHECKSUM_PARTIAL) {
586                 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
587                 hdr->hdr.csum_start = skb_checksum_start_offset(skb);
588                 hdr->hdr.csum_offset = skb->csum_offset;
589         } else {
590                 hdr->hdr.flags = 0;
591                 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
592         }
593
594         if (skb_is_gso(skb)) {
595                 hdr->hdr.hdr_len = skb_headlen(skb);
596                 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
597                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
598                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
599                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
600                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
601                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
602                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
603                 else
604                         BUG();
605                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
606                         hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
607         } else {
608                 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
609                 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
610         }
611
612         hdr->mhdr.num_buffers = 0;
613
614         /* Encode metadata header at front. */
615         if (vi->mergeable_rx_bufs)
616                 sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
617         else
618                 sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
619
620         hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
621         return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
622                                  0, skb, GFP_ATOMIC);
623 }
624
625 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
626 {
627         struct virtnet_info *vi = netdev_priv(dev);
628         int capacity;
629
630         /* Free up any pending old buffers before queueing new ones. */
631         free_old_xmit_skbs(vi);
632
633         /* Try to transmit */
634         capacity = xmit_skb(vi, skb);
635
636         /* This can happen with OOM and indirect buffers. */
637         if (unlikely(capacity < 0)) {
638                 if (likely(capacity == -ENOMEM)) {
639                         if (net_ratelimit())
640                                 dev_warn(&dev->dev,
641                                          "TX queue failure: out of memory\n");
642                 } else {
643                         dev->stats.tx_fifo_errors++;
644                         if (net_ratelimit())
645                                 dev_warn(&dev->dev,
646                                          "Unexpected TX queue failure: %d\n",
647                                          capacity);
648                 }
649                 dev->stats.tx_dropped++;
650                 kfree_skb(skb);
651                 return NETDEV_TX_OK;
652         }
653         virtqueue_kick(vi->svq);
654
655         /* Don't wait up for transmitted skbs to be freed. */
656         skb_orphan(skb);
657         nf_reset(skb);
658
659         /* Apparently nice girls don't return TX_BUSY; stop the queue
660          * before it gets out of hand.  Naturally, this wastes entries. */
661         if (capacity < 2+MAX_SKB_FRAGS) {
662                 netif_stop_queue(dev);
663                 if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
664                         /* More just got used, free them then recheck. */
665                         free_old_xmit_skbs(vi);
666                         capacity = vi->svq->num_free;
667                         if (capacity >= 2+MAX_SKB_FRAGS) {
668                                 netif_start_queue(dev);
669                                 virtqueue_disable_cb(vi->svq);
670                         }
671                 }
672         }
673
674         return NETDEV_TX_OK;
675 }
676
677 static int virtnet_set_mac_address(struct net_device *dev, void *p)
678 {
679         struct virtnet_info *vi = netdev_priv(dev);
680         struct virtio_device *vdev = vi->vdev;
681         int ret;
682
683         ret = eth_mac_addr(dev, p);
684         if (ret)
685                 return ret;
686
687         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
688                 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
689                                   dev->dev_addr, dev->addr_len);
690
691         return 0;
692 }
693
694 static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
695                                                struct rtnl_link_stats64 *tot)
696 {
697         struct virtnet_info *vi = netdev_priv(dev);
698         int cpu;
699         unsigned int start;
700
701         for_each_possible_cpu(cpu) {
702                 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
703                 u64 tpackets, tbytes, rpackets, rbytes;
704
705                 do {
706                         start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
707                         tpackets = stats->tx_packets;
708                         tbytes   = stats->tx_bytes;
709                 } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
710
711                 do {
712                         start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
713                         rpackets = stats->rx_packets;
714                         rbytes   = stats->rx_bytes;
715                 } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
716
717                 tot->rx_packets += rpackets;
718                 tot->tx_packets += tpackets;
719                 tot->rx_bytes   += rbytes;
720                 tot->tx_bytes   += tbytes;
721         }
722
723         tot->tx_dropped = dev->stats.tx_dropped;
724         tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
725         tot->rx_dropped = dev->stats.rx_dropped;
726         tot->rx_length_errors = dev->stats.rx_length_errors;
727         tot->rx_frame_errors = dev->stats.rx_frame_errors;
728
729         return tot;
730 }
731
732 #ifdef CONFIG_NET_POLL_CONTROLLER
733 static void virtnet_netpoll(struct net_device *dev)
734 {
735         struct virtnet_info *vi = netdev_priv(dev);
736
737         napi_schedule(&vi->napi);
738 }
739 #endif
740
741 static int virtnet_open(struct net_device *dev)
742 {
743         struct virtnet_info *vi = netdev_priv(dev);
744
745         /* Make sure we have some buffers: if oom use wq. */
746         if (!try_fill_recv(vi, GFP_KERNEL))
747                 schedule_delayed_work(&vi->refill, 0);
748
749         virtnet_napi_enable(vi);
750         return 0;
751 }
752
753 /*
754  * Send command via the control virtqueue and check status.  Commands
755  * supported by the hypervisor, as indicated by feature bits, should
756  * never fail unless improperly formated.
757  */
758 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
759                                  struct scatterlist *data, int out, int in)
760 {
761         struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
762         struct virtio_net_ctrl_hdr ctrl;
763         virtio_net_ctrl_ack status = ~0;
764         unsigned int tmp;
765         int i;
766
767         /* Caller should know better */
768         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
769                 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
770
771         out++; /* Add header */
772         in++; /* Add return status */
773
774         ctrl.class = class;
775         ctrl.cmd = cmd;
776
777         sg_init_table(sg, out + in);
778
779         sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
780         for_each_sg(data, s, out + in - 2, i)
781                 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
782         sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
783
784         BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
785
786         virtqueue_kick(vi->cvq);
787
788         /*
789          * Spin for a response, the kick causes an ioport write, trapping
790          * into the hypervisor, so the request should be handled immediately.
791          */
792         while (!virtqueue_get_buf(vi->cvq, &tmp))
793                 cpu_relax();
794
795         return status == VIRTIO_NET_OK;
796 }
797
798 static void virtnet_ack_link_announce(struct virtnet_info *vi)
799 {
800         rtnl_lock();
801         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
802                                   VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL,
803                                   0, 0))
804                 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
805         rtnl_unlock();
806 }
807
808 static int virtnet_close(struct net_device *dev)
809 {
810         struct virtnet_info *vi = netdev_priv(dev);
811
812         /* Make sure refill_work doesn't re-enable napi! */
813         cancel_delayed_work_sync(&vi->refill);
814         napi_disable(&vi->napi);
815
816         return 0;
817 }
818
819 static void virtnet_set_rx_mode(struct net_device *dev)
820 {
821         struct virtnet_info *vi = netdev_priv(dev);
822         struct scatterlist sg[2];
823         u8 promisc, allmulti;
824         struct virtio_net_ctrl_mac *mac_data;
825         struct netdev_hw_addr *ha;
826         int uc_count;
827         int mc_count;
828         void *buf;
829         int i;
830
831         /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
832         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
833                 return;
834
835         promisc = ((dev->flags & IFF_PROMISC) != 0);
836         allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
837
838         sg_init_one(sg, &promisc, sizeof(promisc));
839
840         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
841                                   VIRTIO_NET_CTRL_RX_PROMISC,
842                                   sg, 1, 0))
843                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
844                          promisc ? "en" : "dis");
845
846         sg_init_one(sg, &allmulti, sizeof(allmulti));
847
848         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
849                                   VIRTIO_NET_CTRL_RX_ALLMULTI,
850                                   sg, 1, 0))
851                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
852                          allmulti ? "en" : "dis");
853
854         uc_count = netdev_uc_count(dev);
855         mc_count = netdev_mc_count(dev);
856         /* MAC filter - use one buffer for both lists */
857         buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
858                       (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
859         mac_data = buf;
860         if (!buf) {
861                 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
862                 return;
863         }
864
865         sg_init_table(sg, 2);
866
867         /* Store the unicast list and count in the front of the buffer */
868         mac_data->entries = uc_count;
869         i = 0;
870         netdev_for_each_uc_addr(ha, dev)
871                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
872
873         sg_set_buf(&sg[0], mac_data,
874                    sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
875
876         /* multicast list and count fill the end */
877         mac_data = (void *)&mac_data->macs[uc_count][0];
878
879         mac_data->entries = mc_count;
880         i = 0;
881         netdev_for_each_mc_addr(ha, dev)
882                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
883
884         sg_set_buf(&sg[1], mac_data,
885                    sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
886
887         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
888                                   VIRTIO_NET_CTRL_MAC_TABLE_SET,
889                                   sg, 2, 0))
890                 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
891
892         kfree(buf);
893 }
894
895 static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
896 {
897         struct virtnet_info *vi = netdev_priv(dev);
898         struct scatterlist sg;
899
900         sg_init_one(&sg, &vid, sizeof(vid));
901
902         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
903                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
904                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
905         return 0;
906 }
907
908 static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
909 {
910         struct virtnet_info *vi = netdev_priv(dev);
911         struct scatterlist sg;
912
913         sg_init_one(&sg, &vid, sizeof(vid));
914
915         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
916                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
917                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
918         return 0;
919 }
920
921 static void virtnet_get_ringparam(struct net_device *dev,
922                                 struct ethtool_ringparam *ring)
923 {
924         struct virtnet_info *vi = netdev_priv(dev);
925
926         ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq);
927         ring->tx_max_pending = virtqueue_get_vring_size(vi->svq);
928         ring->rx_pending = ring->rx_max_pending;
929         ring->tx_pending = ring->tx_max_pending;
930
931 }
932
933
934 static void virtnet_get_drvinfo(struct net_device *dev,
935                                 struct ethtool_drvinfo *info)
936 {
937         struct virtnet_info *vi = netdev_priv(dev);
938         struct virtio_device *vdev = vi->vdev;
939
940         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
941         strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
942         strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
943
944 }
945
946 static const struct ethtool_ops virtnet_ethtool_ops = {
947         .get_drvinfo = virtnet_get_drvinfo,
948         .get_link = ethtool_op_get_link,
949         .get_ringparam = virtnet_get_ringparam,
950 };
951
952 #define MIN_MTU 68
953 #define MAX_MTU 65535
954
955 static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
956 {
957         if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
958                 return -EINVAL;
959         dev->mtu = new_mtu;
960         return 0;
961 }
962
963 static const struct net_device_ops virtnet_netdev = {
964         .ndo_open            = virtnet_open,
965         .ndo_stop            = virtnet_close,
966         .ndo_start_xmit      = start_xmit,
967         .ndo_validate_addr   = eth_validate_addr,
968         .ndo_set_mac_address = virtnet_set_mac_address,
969         .ndo_set_rx_mode     = virtnet_set_rx_mode,
970         .ndo_change_mtu      = virtnet_change_mtu,
971         .ndo_get_stats64     = virtnet_stats,
972         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
973         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
974 #ifdef CONFIG_NET_POLL_CONTROLLER
975         .ndo_poll_controller = virtnet_netpoll,
976 #endif
977 };
978
979 static void virtnet_config_changed_work(struct work_struct *work)
980 {
981         struct virtnet_info *vi =
982                 container_of(work, struct virtnet_info, config_work);
983         u16 v;
984
985         mutex_lock(&vi->config_lock);
986         if (!vi->config_enable)
987                 goto done;
988
989         if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
990                               offsetof(struct virtio_net_config, status),
991                               &v) < 0)
992                 goto done;
993
994         if (v & VIRTIO_NET_S_ANNOUNCE) {
995                 netdev_notify_peers(vi->dev);
996                 virtnet_ack_link_announce(vi);
997         }
998
999         /* Ignore unknown (future) status bits */
1000         v &= VIRTIO_NET_S_LINK_UP;
1001
1002         if (vi->status == v)
1003                 goto done;
1004
1005         vi->status = v;
1006
1007         if (vi->status & VIRTIO_NET_S_LINK_UP) {
1008                 netif_carrier_on(vi->dev);
1009                 netif_wake_queue(vi->dev);
1010         } else {
1011                 netif_carrier_off(vi->dev);
1012                 netif_stop_queue(vi->dev);
1013         }
1014 done:
1015         mutex_unlock(&vi->config_lock);
1016 }
1017
1018 static void virtnet_config_changed(struct virtio_device *vdev)
1019 {
1020         struct virtnet_info *vi = vdev->priv;
1021
1022         schedule_work(&vi->config_work);
1023 }
1024
1025 static int init_vqs(struct virtnet_info *vi)
1026 {
1027         struct virtqueue *vqs[3];
1028         vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
1029         const char *names[] = { "input", "output", "control" };
1030         int nvqs, err;
1031
1032         /* We expect two virtqueues, receive then send,
1033          * and optionally control. */
1034         nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
1035
1036         err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names);
1037         if (err)
1038                 return err;
1039
1040         vi->rvq = vqs[0];
1041         vi->svq = vqs[1];
1042
1043         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
1044                 vi->cvq = vqs[2];
1045
1046                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1047                         vi->dev->features |= NETIF_F_HW_VLAN_FILTER;
1048         }
1049         return 0;
1050 }
1051
1052 static int virtnet_probe(struct virtio_device *vdev)
1053 {
1054         int err;
1055         struct net_device *dev;
1056         struct virtnet_info *vi;
1057
1058         /* Allocate ourselves a network device with room for our info */
1059         dev = alloc_etherdev(sizeof(struct virtnet_info));
1060         if (!dev)
1061                 return -ENOMEM;
1062
1063         /* Set up network device as normal. */
1064         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1065         dev->netdev_ops = &virtnet_netdev;
1066         dev->features = NETIF_F_HIGHDMA;
1067
1068         SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
1069         SET_NETDEV_DEV(dev, &vdev->dev);
1070
1071         /* Do we support "hardware" checksums? */
1072         if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
1073                 /* This opens up the world of extra features. */
1074                 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1075                 if (csum)
1076                         dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1077
1078                 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1079                         dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
1080                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
1081                 }
1082                 /* Individual feature bits: what can host handle? */
1083                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
1084                         dev->hw_features |= NETIF_F_TSO;
1085                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
1086                         dev->hw_features |= NETIF_F_TSO6;
1087                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1088                         dev->hw_features |= NETIF_F_TSO_ECN;
1089                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1090                         dev->hw_features |= NETIF_F_UFO;
1091
1092                 if (gso)
1093                         dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1094                 /* (!csum && gso) case will be fixed by register_netdev() */
1095         }
1096
1097         /* Configuration may specify what MAC to use.  Otherwise random. */
1098         if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
1099                                   offsetof(struct virtio_net_config, mac),
1100                                   dev->dev_addr, dev->addr_len) < 0)
1101                 eth_hw_addr_random(dev);
1102
1103         /* Set up our device-specific information */
1104         vi = netdev_priv(dev);
1105         netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
1106         vi->dev = dev;
1107         vi->vdev = vdev;
1108         vdev->priv = vi;
1109         vi->pages = NULL;
1110         vi->stats = alloc_percpu(struct virtnet_stats);
1111         err = -ENOMEM;
1112         if (vi->stats == NULL)
1113                 goto free;
1114
1115         INIT_DELAYED_WORK(&vi->refill, refill_work);
1116         mutex_init(&vi->config_lock);
1117         vi->config_enable = true;
1118         INIT_WORK(&vi->config_work, virtnet_config_changed_work);
1119         sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
1120         sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
1121
1122         /* If we can receive ANY GSO packets, we must allocate large ones. */
1123         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1124             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1125             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1126                 vi->big_packets = true;
1127
1128         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1129                 vi->mergeable_rx_bufs = true;
1130
1131         err = init_vqs(vi);
1132         if (err)
1133                 goto free_stats;
1134
1135         err = register_netdev(dev);
1136         if (err) {
1137                 pr_debug("virtio_net: registering device failed\n");
1138                 goto free_vqs;
1139         }
1140
1141         /* Last of all, set up some receive buffers. */
1142         try_fill_recv(vi, GFP_KERNEL);
1143
1144         /* If we didn't even get one input buffer, we're useless. */
1145         if (vi->num == 0) {
1146                 err = -ENOMEM;
1147                 goto unregister;
1148         }
1149
1150         /* Assume link up if device can't report link status,
1151            otherwise get link status from config. */
1152         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1153                 netif_carrier_off(dev);
1154                 schedule_work(&vi->config_work);
1155         } else {
1156                 vi->status = VIRTIO_NET_S_LINK_UP;
1157                 netif_carrier_on(dev);
1158         }
1159
1160         pr_debug("virtnet: registered device %s\n", dev->name);
1161         return 0;
1162
1163 unregister:
1164         unregister_netdev(dev);
1165 free_vqs:
1166         vdev->config->del_vqs(vdev);
1167 free_stats:
1168         free_percpu(vi->stats);
1169 free:
1170         free_netdev(dev);
1171         return err;
1172 }
1173
1174 static void free_unused_bufs(struct virtnet_info *vi)
1175 {
1176         void *buf;
1177         while (1) {
1178                 buf = virtqueue_detach_unused_buf(vi->svq);
1179                 if (!buf)
1180                         break;
1181                 dev_kfree_skb(buf);
1182         }
1183         while (1) {
1184                 buf = virtqueue_detach_unused_buf(vi->rvq);
1185                 if (!buf)
1186                         break;
1187                 if (vi->mergeable_rx_bufs || vi->big_packets)
1188                         give_pages(vi, buf);
1189                 else
1190                         dev_kfree_skb(buf);
1191                 --vi->num;
1192         }
1193         BUG_ON(vi->num != 0);
1194 }
1195
1196 static void remove_vq_common(struct virtnet_info *vi)
1197 {
1198         vi->vdev->config->reset(vi->vdev);
1199
1200         /* Free unused buffers in both send and recv, if any. */
1201         free_unused_bufs(vi);
1202
1203         vi->vdev->config->del_vqs(vi->vdev);
1204
1205         while (vi->pages)
1206                 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
1207 }
1208
1209 static void __devexit virtnet_remove(struct virtio_device *vdev)
1210 {
1211         struct virtnet_info *vi = vdev->priv;
1212
1213         /* Prevent config work handler from accessing the device. */
1214         mutex_lock(&vi->config_lock);
1215         vi->config_enable = false;
1216         mutex_unlock(&vi->config_lock);
1217
1218         unregister_netdev(vi->dev);
1219
1220         remove_vq_common(vi);
1221
1222         flush_work(&vi->config_work);
1223
1224         free_percpu(vi->stats);
1225         free_netdev(vi->dev);
1226 }
1227
1228 #ifdef CONFIG_PM
1229 static int virtnet_freeze(struct virtio_device *vdev)
1230 {
1231         struct virtnet_info *vi = vdev->priv;
1232
1233         /* Prevent config work handler from accessing the device */
1234         mutex_lock(&vi->config_lock);
1235         vi->config_enable = false;
1236         mutex_unlock(&vi->config_lock);
1237
1238         netif_device_detach(vi->dev);
1239         cancel_delayed_work_sync(&vi->refill);
1240
1241         if (netif_running(vi->dev))
1242                 napi_disable(&vi->napi);
1243
1244         remove_vq_common(vi);
1245
1246         flush_work(&vi->config_work);
1247
1248         return 0;
1249 }
1250
1251 static int virtnet_restore(struct virtio_device *vdev)
1252 {
1253         struct virtnet_info *vi = vdev->priv;
1254         int err;
1255
1256         err = init_vqs(vi);
1257         if (err)
1258                 return err;
1259
1260         if (netif_running(vi->dev))
1261                 virtnet_napi_enable(vi);
1262
1263         netif_device_attach(vi->dev);
1264
1265         if (!try_fill_recv(vi, GFP_KERNEL))
1266                 schedule_delayed_work(&vi->refill, 0);
1267
1268         mutex_lock(&vi->config_lock);
1269         vi->config_enable = true;
1270         mutex_unlock(&vi->config_lock);
1271
1272         return 0;
1273 }
1274 #endif
1275
1276 static struct virtio_device_id id_table[] = {
1277         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
1278         { 0 },
1279 };
1280
1281 static unsigned int features[] = {
1282         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1283         VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1284         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1285         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1286         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1287         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1288         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1289         VIRTIO_NET_F_GUEST_ANNOUNCE,
1290 };
1291
1292 static struct virtio_driver virtio_net_driver = {
1293         .feature_table = features,
1294         .feature_table_size = ARRAY_SIZE(features),
1295         .driver.name =  KBUILD_MODNAME,
1296         .driver.owner = THIS_MODULE,
1297         .id_table =     id_table,
1298         .probe =        virtnet_probe,
1299         .remove =       __devexit_p(virtnet_remove),
1300         .config_changed = virtnet_config_changed,
1301 #ifdef CONFIG_PM
1302         .freeze =       virtnet_freeze,
1303         .restore =      virtnet_restore,
1304 #endif
1305 };
1306
1307 static int __init init(void)
1308 {
1309         return register_virtio_driver(&virtio_net_driver);
1310 }
1311
1312 static void __exit fini(void)
1313 {
1314         unregister_virtio_driver(&virtio_net_driver);
1315 }
1316 module_init(init);
1317 module_exit(fini);
1318
1319 MODULE_DEVICE_TABLE(virtio, id_table);
1320 MODULE_DESCRIPTION("Virtio network driver");
1321 MODULE_LICENSE("GPL");