]> Pileus Git - ~andy/linux/blob - drivers/net/xen-netback/netback.c
xen-netback: avoid allocating variable size array on stack
[~andy/linux] / drivers / net / xen-netback / netback.c
1 /*
2  * Back-end of the driver for virtual network devices. This portion of the
3  * driver exports a 'unified' network-device interface that can be accessed
4  * by any operating system that implements a compatible front end. A
5  * reference front-end implementation can be found in:
6  *  drivers/net/xen-netfront.c
7  *
8  * Copyright (c) 2002-2005, K A Fraser
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation; or, when distributed
13  * separately from the Linux kernel or incorporated into other
14  * software packages, subject to the following license:
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a copy
17  * of this source file (the "Software"), to deal in the Software without
18  * restriction, including without limitation the rights to use, copy, modify,
19  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20  * and to permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice shall be included in
24  * all copies or substantial portions of the Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32  * IN THE SOFTWARE.
33  */
34
35 #include "common.h"
36
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40
41 #include <net/tcp.h>
42
43 #include <xen/xen.h>
44 #include <xen/events.h>
45 #include <xen/interface/memory.h>
46
47 #include <asm/xen/hypercall.h>
48 #include <asm/xen/page.h>
49
50 /*
51  * This is the maximum slots a skb can have. If a guest sends a skb
52  * which exceeds this limit it is considered malicious.
53  */
54 #define MAX_SKB_SLOTS_DEFAULT 20
55 static unsigned int max_skb_slots = MAX_SKB_SLOTS_DEFAULT;
56 module_param(max_skb_slots, uint, 0444);
57
58 typedef unsigned int pending_ring_idx_t;
59 #define INVALID_PENDING_RING_IDX (~0U)
60
61 struct pending_tx_info {
62         struct xen_netif_tx_request req; /* coalesced tx request */
63         struct xenvif *vif;
64         pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
65                                   * if it is head of one or more tx
66                                   * reqs
67                                   */
68 };
69
70 struct netbk_rx_meta {
71         int id;
72         int size;
73         int gso_size;
74 };
75
76 #define MAX_PENDING_REQS 256
77
78 /* Discriminate from any valid pending_idx value. */
79 #define INVALID_PENDING_IDX 0xFFFF
80
81 #define MAX_BUFFER_OFFSET PAGE_SIZE
82
83 /* extra field used in struct page */
84 union page_ext {
85         struct {
86 #if BITS_PER_LONG < 64
87 #define IDX_WIDTH   8
88 #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
89                 unsigned int group:GROUP_WIDTH;
90                 unsigned int idx:IDX_WIDTH;
91 #else
92                 unsigned int group, idx;
93 #endif
94         } e;
95         void *mapping;
96 };
97
98 struct xen_netbk {
99         wait_queue_head_t wq;
100         struct task_struct *task;
101
102         struct sk_buff_head rx_queue;
103         struct sk_buff_head tx_queue;
104
105         struct timer_list net_timer;
106
107         struct page *mmap_pages[MAX_PENDING_REQS];
108
109         pending_ring_idx_t pending_prod;
110         pending_ring_idx_t pending_cons;
111         struct list_head net_schedule_list;
112
113         /* Protect the net_schedule_list in netif. */
114         spinlock_t net_schedule_list_lock;
115
116         atomic_t netfront_count;
117
118         struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
119         /* Coalescing tx requests before copying makes number of grant
120          * copy ops greater or equal to number of slots required. In
121          * worst case a tx request consumes 2 gnttab_copy.
122          */
123         struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
124
125         u16 pending_ring[MAX_PENDING_REQS];
126
127         /*
128          * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
129          * head/fragment page uses 2 copy operations because it
130          * straddles two buffers in the frontend.
131          */
132         struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
133         struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
134 };
135
136 static struct xen_netbk *xen_netbk;
137 static int xen_netbk_group_nr;
138
139 /*
140  * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
141  * one or more merged tx requests, otherwise it is the continuation of
142  * previous tx request.
143  */
144 static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
145 {
146         return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
147 }
148
149 void xen_netbk_add_xenvif(struct xenvif *vif)
150 {
151         int i;
152         int min_netfront_count;
153         int min_group = 0;
154         struct xen_netbk *netbk;
155
156         min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
157         for (i = 0; i < xen_netbk_group_nr; i++) {
158                 int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
159                 if (netfront_count < min_netfront_count) {
160                         min_group = i;
161                         min_netfront_count = netfront_count;
162                 }
163         }
164
165         netbk = &xen_netbk[min_group];
166
167         vif->netbk = netbk;
168         atomic_inc(&netbk->netfront_count);
169 }
170
171 void xen_netbk_remove_xenvif(struct xenvif *vif)
172 {
173         struct xen_netbk *netbk = vif->netbk;
174         vif->netbk = NULL;
175         atomic_dec(&netbk->netfront_count);
176 }
177
178 static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
179                                   u8 status);
180 static void make_tx_response(struct xenvif *vif,
181                              struct xen_netif_tx_request *txp,
182                              s8       st);
183 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
184                                              u16      id,
185                                              s8       st,
186                                              u16      offset,
187                                              u16      size,
188                                              u16      flags);
189
190 static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
191                                        u16 idx)
192 {
193         return page_to_pfn(netbk->mmap_pages[idx]);
194 }
195
196 static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
197                                          u16 idx)
198 {
199         return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
200 }
201
202 /* extra field used in struct page */
203 static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
204                                 unsigned int idx)
205 {
206         unsigned int group = netbk - xen_netbk;
207         union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
208
209         BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
210         pg->mapping = ext.mapping;
211 }
212
213 static int get_page_ext(struct page *pg,
214                         unsigned int *pgroup, unsigned int *pidx)
215 {
216         union page_ext ext = { .mapping = pg->mapping };
217         struct xen_netbk *netbk;
218         unsigned int group, idx;
219
220         group = ext.e.group - 1;
221
222         if (group < 0 || group >= xen_netbk_group_nr)
223                 return 0;
224
225         netbk = &xen_netbk[group];
226
227         idx = ext.e.idx;
228
229         if ((idx < 0) || (idx >= MAX_PENDING_REQS))
230                 return 0;
231
232         if (netbk->mmap_pages[idx] != pg)
233                 return 0;
234
235         *pgroup = group;
236         *pidx = idx;
237
238         return 1;
239 }
240
241 /*
242  * This is the amount of packet we copy rather than map, so that the
243  * guest can't fiddle with the contents of the headers while we do
244  * packet processing on them (netfilter, routing, etc).
245  */
246 #define PKT_PROT_LEN    (ETH_HLEN + \
247                          VLAN_HLEN + \
248                          sizeof(struct iphdr) + MAX_IPOPTLEN + \
249                          sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
250
251 static u16 frag_get_pending_idx(skb_frag_t *frag)
252 {
253         return (u16)frag->page_offset;
254 }
255
256 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
257 {
258         frag->page_offset = pending_idx;
259 }
260
261 static inline pending_ring_idx_t pending_index(unsigned i)
262 {
263         return i & (MAX_PENDING_REQS-1);
264 }
265
266 static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
267 {
268         return MAX_PENDING_REQS -
269                 netbk->pending_prod + netbk->pending_cons;
270 }
271
272 static void xen_netbk_kick_thread(struct xen_netbk *netbk)
273 {
274         wake_up(&netbk->wq);
275 }
276
277 static int max_required_rx_slots(struct xenvif *vif)
278 {
279         int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
280
281         /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
282         if (vif->can_sg || vif->gso || vif->gso_prefix)
283                 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
284
285         return max;
286 }
287
288 int xen_netbk_rx_ring_full(struct xenvif *vif)
289 {
290         RING_IDX peek   = vif->rx_req_cons_peek;
291         RING_IDX needed = max_required_rx_slots(vif);
292
293         return ((vif->rx.sring->req_prod - peek) < needed) ||
294                ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
295 }
296
297 int xen_netbk_must_stop_queue(struct xenvif *vif)
298 {
299         if (!xen_netbk_rx_ring_full(vif))
300                 return 0;
301
302         vif->rx.sring->req_event = vif->rx_req_cons_peek +
303                 max_required_rx_slots(vif);
304         mb(); /* request notification /then/ check the queue */
305
306         return xen_netbk_rx_ring_full(vif);
307 }
308
309 /*
310  * Returns true if we should start a new receive buffer instead of
311  * adding 'size' bytes to a buffer which currently contains 'offset'
312  * bytes.
313  */
314 static bool start_new_rx_buffer(int offset, unsigned long size, int head)
315 {
316         /* simple case: we have completely filled the current buffer. */
317         if (offset == MAX_BUFFER_OFFSET)
318                 return true;
319
320         /*
321          * complex case: start a fresh buffer if the current frag
322          * would overflow the current buffer but only if:
323          *     (i)   this frag would fit completely in the next buffer
324          * and (ii)  there is already some data in the current buffer
325          * and (iii) this is not the head buffer.
326          *
327          * Where:
328          * - (i) stops us splitting a frag into two copies
329          *   unless the frag is too large for a single buffer.
330          * - (ii) stops us from leaving a buffer pointlessly empty.
331          * - (iii) stops us leaving the first buffer
332          *   empty. Strictly speaking this is already covered
333          *   by (ii) but is explicitly checked because
334          *   netfront relies on the first buffer being
335          *   non-empty and can crash otherwise.
336          *
337          * This means we will effectively linearise small
338          * frags but do not needlessly split large buffers
339          * into multiple copies tend to give large frags their
340          * own buffers as before.
341          */
342         if ((offset + size > MAX_BUFFER_OFFSET) &&
343             (size <= MAX_BUFFER_OFFSET) && offset && !head)
344                 return true;
345
346         return false;
347 }
348
349 /*
350  * Figure out how many ring slots we're going to need to send @skb to
351  * the guest. This function is essentially a dry run of
352  * netbk_gop_frag_copy.
353  */
354 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
355 {
356         unsigned int count;
357         int i, copy_off;
358
359         count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
360
361         copy_off = skb_headlen(skb) % PAGE_SIZE;
362
363         if (skb_shinfo(skb)->gso_size)
364                 count++;
365
366         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
367                 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
368                 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
369                 unsigned long bytes;
370
371                 offset &= ~PAGE_MASK;
372
373                 while (size > 0) {
374                         BUG_ON(offset >= PAGE_SIZE);
375                         BUG_ON(copy_off > MAX_BUFFER_OFFSET);
376
377                         bytes = PAGE_SIZE - offset;
378
379                         if (bytes > size)
380                                 bytes = size;
381
382                         if (start_new_rx_buffer(copy_off, bytes, 0)) {
383                                 count++;
384                                 copy_off = 0;
385                         }
386
387                         if (copy_off + bytes > MAX_BUFFER_OFFSET)
388                                 bytes = MAX_BUFFER_OFFSET - copy_off;
389
390                         copy_off += bytes;
391
392                         offset += bytes;
393                         size -= bytes;
394
395                         if (offset == PAGE_SIZE)
396                                 offset = 0;
397                 }
398         }
399         return count;
400 }
401
402 struct netrx_pending_operations {
403         unsigned copy_prod, copy_cons;
404         unsigned meta_prod, meta_cons;
405         struct gnttab_copy *copy;
406         struct netbk_rx_meta *meta;
407         int copy_off;
408         grant_ref_t copy_gref;
409 };
410
411 static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
412                                                 struct netrx_pending_operations *npo)
413 {
414         struct netbk_rx_meta *meta;
415         struct xen_netif_rx_request *req;
416
417         req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
418
419         meta = npo->meta + npo->meta_prod++;
420         meta->gso_size = 0;
421         meta->size = 0;
422         meta->id = req->id;
423
424         npo->copy_off = 0;
425         npo->copy_gref = req->gref;
426
427         return meta;
428 }
429
430 /*
431  * Set up the grant operations for this fragment. If it's a flipping
432  * interface, we also set up the unmap request from here.
433  */
434 static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
435                                 struct netrx_pending_operations *npo,
436                                 struct page *page, unsigned long size,
437                                 unsigned long offset, int *head)
438 {
439         struct gnttab_copy *copy_gop;
440         struct netbk_rx_meta *meta;
441         /*
442          * These variables are used iff get_page_ext returns true,
443          * in which case they are guaranteed to be initialized.
444          */
445         unsigned int uninitialized_var(group), uninitialized_var(idx);
446         int foreign = get_page_ext(page, &group, &idx);
447         unsigned long bytes;
448
449         /* Data must not cross a page boundary. */
450         BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
451
452         meta = npo->meta + npo->meta_prod - 1;
453
454         /* Skip unused frames from start of page */
455         page += offset >> PAGE_SHIFT;
456         offset &= ~PAGE_MASK;
457
458         while (size > 0) {
459                 BUG_ON(offset >= PAGE_SIZE);
460                 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
461
462                 bytes = PAGE_SIZE - offset;
463
464                 if (bytes > size)
465                         bytes = size;
466
467                 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
468                         /*
469                          * Netfront requires there to be some data in the head
470                          * buffer.
471                          */
472                         BUG_ON(*head);
473
474                         meta = get_next_rx_buffer(vif, npo);
475                 }
476
477                 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
478                         bytes = MAX_BUFFER_OFFSET - npo->copy_off;
479
480                 copy_gop = npo->copy + npo->copy_prod++;
481                 copy_gop->flags = GNTCOPY_dest_gref;
482                 if (foreign) {
483                         struct xen_netbk *netbk = &xen_netbk[group];
484                         struct pending_tx_info *src_pend;
485
486                         src_pend = &netbk->pending_tx_info[idx];
487
488                         copy_gop->source.domid = src_pend->vif->domid;
489                         copy_gop->source.u.ref = src_pend->req.gref;
490                         copy_gop->flags |= GNTCOPY_source_gref;
491                 } else {
492                         void *vaddr = page_address(page);
493                         copy_gop->source.domid = DOMID_SELF;
494                         copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
495                 }
496                 copy_gop->source.offset = offset;
497                 copy_gop->dest.domid = vif->domid;
498
499                 copy_gop->dest.offset = npo->copy_off;
500                 copy_gop->dest.u.ref = npo->copy_gref;
501                 copy_gop->len = bytes;
502
503                 npo->copy_off += bytes;
504                 meta->size += bytes;
505
506                 offset += bytes;
507                 size -= bytes;
508
509                 /* Next frame */
510                 if (offset == PAGE_SIZE && size) {
511                         BUG_ON(!PageCompound(page));
512                         page++;
513                         offset = 0;
514                 }
515
516                 /* Leave a gap for the GSO descriptor. */
517                 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
518                         vif->rx.req_cons++;
519
520                 *head = 0; /* There must be something in this buffer now. */
521
522         }
523 }
524
525 /*
526  * Prepare an SKB to be transmitted to the frontend.
527  *
528  * This function is responsible for allocating grant operations, meta
529  * structures, etc.
530  *
531  * It returns the number of meta structures consumed. The number of
532  * ring slots used is always equal to the number of meta slots used
533  * plus the number of GSO descriptors used. Currently, we use either
534  * zero GSO descriptors (for non-GSO packets) or one descriptor (for
535  * frontend-side LRO).
536  */
537 static int netbk_gop_skb(struct sk_buff *skb,
538                          struct netrx_pending_operations *npo)
539 {
540         struct xenvif *vif = netdev_priv(skb->dev);
541         int nr_frags = skb_shinfo(skb)->nr_frags;
542         int i;
543         struct xen_netif_rx_request *req;
544         struct netbk_rx_meta *meta;
545         unsigned char *data;
546         int head = 1;
547         int old_meta_prod;
548
549         old_meta_prod = npo->meta_prod;
550
551         /* Set up a GSO prefix descriptor, if necessary */
552         if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
553                 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
554                 meta = npo->meta + npo->meta_prod++;
555                 meta->gso_size = skb_shinfo(skb)->gso_size;
556                 meta->size = 0;
557                 meta->id = req->id;
558         }
559
560         req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
561         meta = npo->meta + npo->meta_prod++;
562
563         if (!vif->gso_prefix)
564                 meta->gso_size = skb_shinfo(skb)->gso_size;
565         else
566                 meta->gso_size = 0;
567
568         meta->size = 0;
569         meta->id = req->id;
570         npo->copy_off = 0;
571         npo->copy_gref = req->gref;
572
573         data = skb->data;
574         while (data < skb_tail_pointer(skb)) {
575                 unsigned int offset = offset_in_page(data);
576                 unsigned int len = PAGE_SIZE - offset;
577
578                 if (data + len > skb_tail_pointer(skb))
579                         len = skb_tail_pointer(skb) - data;
580
581                 netbk_gop_frag_copy(vif, skb, npo,
582                                     virt_to_page(data), len, offset, &head);
583                 data += len;
584         }
585
586         for (i = 0; i < nr_frags; i++) {
587                 netbk_gop_frag_copy(vif, skb, npo,
588                                     skb_frag_page(&skb_shinfo(skb)->frags[i]),
589                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
590                                     skb_shinfo(skb)->frags[i].page_offset,
591                                     &head);
592         }
593
594         return npo->meta_prod - old_meta_prod;
595 }
596
597 /*
598  * This is a twin to netbk_gop_skb.  Assume that netbk_gop_skb was
599  * used to set up the operations on the top of
600  * netrx_pending_operations, which have since been done.  Check that
601  * they didn't give any errors and advance over them.
602  */
603 static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
604                            struct netrx_pending_operations *npo)
605 {
606         struct gnttab_copy     *copy_op;
607         int status = XEN_NETIF_RSP_OKAY;
608         int i;
609
610         for (i = 0; i < nr_meta_slots; i++) {
611                 copy_op = npo->copy + npo->copy_cons++;
612                 if (copy_op->status != GNTST_okay) {
613                         netdev_dbg(vif->dev,
614                                    "Bad status %d from copy to DOM%d.\n",
615                                    copy_op->status, vif->domid);
616                         status = XEN_NETIF_RSP_ERROR;
617                 }
618         }
619
620         return status;
621 }
622
623 static void netbk_add_frag_responses(struct xenvif *vif, int status,
624                                      struct netbk_rx_meta *meta,
625                                      int nr_meta_slots)
626 {
627         int i;
628         unsigned long offset;
629
630         /* No fragments used */
631         if (nr_meta_slots <= 1)
632                 return;
633
634         nr_meta_slots--;
635
636         for (i = 0; i < nr_meta_slots; i++) {
637                 int flags;
638                 if (i == nr_meta_slots - 1)
639                         flags = 0;
640                 else
641                         flags = XEN_NETRXF_more_data;
642
643                 offset = 0;
644                 make_rx_response(vif, meta[i].id, status, offset,
645                                  meta[i].size, flags);
646         }
647 }
648
649 struct skb_cb_overlay {
650         int meta_slots_used;
651 };
652
653 static void xen_netbk_rx_action(struct xen_netbk *netbk)
654 {
655         struct xenvif *vif = NULL, *tmp;
656         s8 status;
657         u16 irq, flags;
658         struct xen_netif_rx_response *resp;
659         struct sk_buff_head rxq;
660         struct sk_buff *skb;
661         LIST_HEAD(notify);
662         int ret;
663         int nr_frags;
664         int count;
665         unsigned long offset;
666         struct skb_cb_overlay *sco;
667
668         struct netrx_pending_operations npo = {
669                 .copy  = netbk->grant_copy_op,
670                 .meta  = netbk->meta,
671         };
672
673         skb_queue_head_init(&rxq);
674
675         count = 0;
676
677         while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
678                 vif = netdev_priv(skb->dev);
679                 nr_frags = skb_shinfo(skb)->nr_frags;
680
681                 sco = (struct skb_cb_overlay *)skb->cb;
682                 sco->meta_slots_used = netbk_gop_skb(skb, &npo);
683
684                 count += nr_frags + 1;
685
686                 __skb_queue_tail(&rxq, skb);
687
688                 /* Filled the batch queue? */
689                 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
690                 if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
691                         break;
692         }
693
694         BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
695
696         if (!npo.copy_prod)
697                 return;
698
699         BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
700         gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
701
702         while ((skb = __skb_dequeue(&rxq)) != NULL) {
703                 sco = (struct skb_cb_overlay *)skb->cb;
704
705                 vif = netdev_priv(skb->dev);
706
707                 if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
708                         resp = RING_GET_RESPONSE(&vif->rx,
709                                                 vif->rx.rsp_prod_pvt++);
710
711                         resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
712
713                         resp->offset = netbk->meta[npo.meta_cons].gso_size;
714                         resp->id = netbk->meta[npo.meta_cons].id;
715                         resp->status = sco->meta_slots_used;
716
717                         npo.meta_cons++;
718                         sco->meta_slots_used--;
719                 }
720
721
722                 vif->dev->stats.tx_bytes += skb->len;
723                 vif->dev->stats.tx_packets++;
724
725                 status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
726
727                 if (sco->meta_slots_used == 1)
728                         flags = 0;
729                 else
730                         flags = XEN_NETRXF_more_data;
731
732                 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
733                         flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
734                 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
735                         /* remote but checksummed. */
736                         flags |= XEN_NETRXF_data_validated;
737
738                 offset = 0;
739                 resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
740                                         status, offset,
741                                         netbk->meta[npo.meta_cons].size,
742                                         flags);
743
744                 if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
745                         struct xen_netif_extra_info *gso =
746                                 (struct xen_netif_extra_info *)
747                                 RING_GET_RESPONSE(&vif->rx,
748                                                   vif->rx.rsp_prod_pvt++);
749
750                         resp->flags |= XEN_NETRXF_extra_info;
751
752                         gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
753                         gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
754                         gso->u.gso.pad = 0;
755                         gso->u.gso.features = 0;
756
757                         gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
758                         gso->flags = 0;
759                 }
760
761                 netbk_add_frag_responses(vif, status,
762                                          netbk->meta + npo.meta_cons + 1,
763                                          sco->meta_slots_used);
764
765                 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
766                 irq = vif->irq;
767                 if (ret && list_empty(&vif->notify_list))
768                         list_add_tail(&vif->notify_list, &notify);
769
770                 xenvif_notify_tx_completion(vif);
771
772                 xenvif_put(vif);
773                 npo.meta_cons += sco->meta_slots_used;
774                 dev_kfree_skb(skb);
775         }
776
777         list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
778                 notify_remote_via_irq(vif->irq);
779                 list_del_init(&vif->notify_list);
780         }
781
782         /* More work to do? */
783         if (!skb_queue_empty(&netbk->rx_queue) &&
784                         !timer_pending(&netbk->net_timer))
785                 xen_netbk_kick_thread(netbk);
786 }
787
788 void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
789 {
790         struct xen_netbk *netbk = vif->netbk;
791
792         skb_queue_tail(&netbk->rx_queue, skb);
793
794         xen_netbk_kick_thread(netbk);
795 }
796
797 static void xen_netbk_alarm(unsigned long data)
798 {
799         struct xen_netbk *netbk = (struct xen_netbk *)data;
800         xen_netbk_kick_thread(netbk);
801 }
802
803 static int __on_net_schedule_list(struct xenvif *vif)
804 {
805         return !list_empty(&vif->schedule_list);
806 }
807
808 /* Must be called with net_schedule_list_lock held */
809 static void remove_from_net_schedule_list(struct xenvif *vif)
810 {
811         if (likely(__on_net_schedule_list(vif))) {
812                 list_del_init(&vif->schedule_list);
813                 xenvif_put(vif);
814         }
815 }
816
817 static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
818 {
819         struct xenvif *vif = NULL;
820
821         spin_lock_irq(&netbk->net_schedule_list_lock);
822         if (list_empty(&netbk->net_schedule_list))
823                 goto out;
824
825         vif = list_first_entry(&netbk->net_schedule_list,
826                                struct xenvif, schedule_list);
827         if (!vif)
828                 goto out;
829
830         xenvif_get(vif);
831
832         remove_from_net_schedule_list(vif);
833 out:
834         spin_unlock_irq(&netbk->net_schedule_list_lock);
835         return vif;
836 }
837
838 void xen_netbk_schedule_xenvif(struct xenvif *vif)
839 {
840         unsigned long flags;
841         struct xen_netbk *netbk = vif->netbk;
842
843         if (__on_net_schedule_list(vif))
844                 goto kick;
845
846         spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
847         if (!__on_net_schedule_list(vif) &&
848             likely(xenvif_schedulable(vif))) {
849                 list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
850                 xenvif_get(vif);
851         }
852         spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
853
854 kick:
855         smp_mb();
856         if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
857             !list_empty(&netbk->net_schedule_list))
858                 xen_netbk_kick_thread(netbk);
859 }
860
861 void xen_netbk_deschedule_xenvif(struct xenvif *vif)
862 {
863         struct xen_netbk *netbk = vif->netbk;
864         spin_lock_irq(&netbk->net_schedule_list_lock);
865         remove_from_net_schedule_list(vif);
866         spin_unlock_irq(&netbk->net_schedule_list_lock);
867 }
868
869 void xen_netbk_check_rx_xenvif(struct xenvif *vif)
870 {
871         int more_to_do;
872
873         RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
874
875         if (more_to_do)
876                 xen_netbk_schedule_xenvif(vif);
877 }
878
879 static void tx_add_credit(struct xenvif *vif)
880 {
881         unsigned long max_burst, max_credit;
882
883         /*
884          * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
885          * Otherwise the interface can seize up due to insufficient credit.
886          */
887         max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
888         max_burst = min(max_burst, 131072UL);
889         max_burst = max(max_burst, vif->credit_bytes);
890
891         /* Take care that adding a new chunk of credit doesn't wrap to zero. */
892         max_credit = vif->remaining_credit + vif->credit_bytes;
893         if (max_credit < vif->remaining_credit)
894                 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
895
896         vif->remaining_credit = min(max_credit, max_burst);
897 }
898
899 static void tx_credit_callback(unsigned long data)
900 {
901         struct xenvif *vif = (struct xenvif *)data;
902         tx_add_credit(vif);
903         xen_netbk_check_rx_xenvif(vif);
904 }
905
906 static void netbk_tx_err(struct xenvif *vif,
907                          struct xen_netif_tx_request *txp, RING_IDX end)
908 {
909         RING_IDX cons = vif->tx.req_cons;
910
911         do {
912                 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
913                 if (cons == end)
914                         break;
915                 txp = RING_GET_REQUEST(&vif->tx, cons++);
916         } while (1);
917         vif->tx.req_cons = cons;
918         xen_netbk_check_rx_xenvif(vif);
919         xenvif_put(vif);
920 }
921
922 static void netbk_fatal_tx_err(struct xenvif *vif)
923 {
924         netdev_err(vif->dev, "fatal error; disabling device\n");
925         xenvif_carrier_off(vif);
926         xenvif_put(vif);
927 }
928
929 static int netbk_count_requests(struct xenvif *vif,
930                                 struct xen_netif_tx_request *first,
931                                 struct xen_netif_tx_request *txp,
932                                 int work_to_do)
933 {
934         RING_IDX cons = vif->tx.req_cons;
935         int slots = 0;
936         int drop_err = 0;
937         int more_data;
938
939         if (!(first->flags & XEN_NETTXF_more_data))
940                 return 0;
941
942         do {
943                 struct xen_netif_tx_request dropped_tx = { 0 };
944
945                 if (slots >= work_to_do) {
946                         netdev_err(vif->dev,
947                                    "Asked for %d slots but exceeds this limit\n",
948                                    work_to_do);
949                         netbk_fatal_tx_err(vif);
950                         return -ENODATA;
951                 }
952
953                 /* This guest is really using too many slots and
954                  * considered malicious.
955                  */
956                 if (unlikely(slots >= max_skb_slots)) {
957                         netdev_err(vif->dev,
958                                    "Malicious frontend using %d slots, threshold %u\n",
959                                    slots, max_skb_slots);
960                         netbk_fatal_tx_err(vif);
961                         return -E2BIG;
962                 }
963
964                 /* Xen network protocol had implicit dependency on
965                  * MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the
966                  * historical MAX_SKB_FRAGS value 18 to honor the same
967                  * behavior as before. Any packet using more than 18
968                  * slots but less than max_skb_slots slots is dropped
969                  */
970                 if (!drop_err && slots >= XEN_NETIF_NR_SLOTS_MIN) {
971                         if (net_ratelimit())
972                                 netdev_dbg(vif->dev,
973                                            "Too many slots (%d) exceeding limit (%d), dropping packet\n",
974                                            slots, XEN_NETIF_NR_SLOTS_MIN);
975                         drop_err = -E2BIG;
976                 }
977
978                 if (drop_err)
979                         txp = &dropped_tx;
980
981                 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
982                        sizeof(*txp));
983
984                 /* If the guest submitted a frame >= 64 KiB then
985                  * first->size overflowed and following slots will
986                  * appear to be larger than the frame.
987                  *
988                  * This cannot be fatal error as there are buggy
989                  * frontends that do this.
990                  *
991                  * Consume all slots and drop the packet.
992                  */
993                 if (!drop_err && txp->size > first->size) {
994                         if (net_ratelimit())
995                                 netdev_dbg(vif->dev,
996                                            "Invalid tx request, slot size %u > remaining size %u\n",
997                                            txp->size, first->size);
998                         drop_err = -EIO;
999                 }
1000
1001                 first->size -= txp->size;
1002                 slots++;
1003
1004                 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
1005                         netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
1006                                  txp->offset, txp->size);
1007                         netbk_fatal_tx_err(vif);
1008                         return -EINVAL;
1009                 }
1010
1011                 more_data = txp->flags & XEN_NETTXF_more_data;
1012
1013                 if (!drop_err)
1014                         txp++;
1015
1016         } while (more_data);
1017
1018         if (drop_err) {
1019                 netbk_tx_err(vif, first, cons + slots);
1020                 return drop_err;
1021         }
1022
1023         return slots;
1024 }
1025
1026 static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
1027                                          u16 pending_idx)
1028 {
1029         struct page *page;
1030         page = alloc_page(GFP_KERNEL|__GFP_COLD);
1031         if (!page)
1032                 return NULL;
1033         set_page_ext(page, netbk, pending_idx);
1034         netbk->mmap_pages[pending_idx] = page;
1035         return page;
1036 }
1037
1038 static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1039                                                   struct xenvif *vif,
1040                                                   struct sk_buff *skb,
1041                                                   struct xen_netif_tx_request *txp,
1042                                                   struct gnttab_copy *gop)
1043 {
1044         struct skb_shared_info *shinfo = skb_shinfo(skb);
1045         skb_frag_t *frags = shinfo->frags;
1046         u16 pending_idx = *((u16 *)skb->data);
1047         u16 head_idx = 0;
1048         int slot, start;
1049         struct page *page;
1050         pending_ring_idx_t index, start_idx = 0;
1051         uint16_t dst_offset;
1052         unsigned int nr_slots;
1053         struct pending_tx_info *first = NULL;
1054
1055         /* At this point shinfo->nr_frags is in fact the number of
1056          * slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN.
1057          */
1058         nr_slots = shinfo->nr_frags;
1059
1060         /* Skip first skb fragment if it is on same page as header fragment. */
1061         start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1062
1063         /* Coalesce tx requests, at this point the packet passed in
1064          * should be <= 64K. Any packets larger than 64K have been
1065          * handled in netbk_count_requests().
1066          */
1067         for (shinfo->nr_frags = slot = start; slot < nr_slots;
1068              shinfo->nr_frags++) {
1069                 struct pending_tx_info *pending_tx_info =
1070                         netbk->pending_tx_info;
1071
1072                 page = alloc_page(GFP_KERNEL|__GFP_COLD);
1073                 if (!page)
1074                         goto err;
1075
1076                 dst_offset = 0;
1077                 first = NULL;
1078                 while (dst_offset < PAGE_SIZE && slot < nr_slots) {
1079                         gop->flags = GNTCOPY_source_gref;
1080
1081                         gop->source.u.ref = txp->gref;
1082                         gop->source.domid = vif->domid;
1083                         gop->source.offset = txp->offset;
1084
1085                         gop->dest.domid = DOMID_SELF;
1086
1087                         gop->dest.offset = dst_offset;
1088                         gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1089
1090                         if (dst_offset + txp->size > PAGE_SIZE) {
1091                                 /* This page can only merge a portion
1092                                  * of tx request. Do not increment any
1093                                  * pointer / counter here. The txp
1094                                  * will be dealt with in future
1095                                  * rounds, eventually hitting the
1096                                  * `else` branch.
1097                                  */
1098                                 gop->len = PAGE_SIZE - dst_offset;
1099                                 txp->offset += gop->len;
1100                                 txp->size -= gop->len;
1101                                 dst_offset += gop->len; /* quit loop */
1102                         } else {
1103                                 /* This tx request can be merged in the page */
1104                                 gop->len = txp->size;
1105                                 dst_offset += gop->len;
1106
1107                                 index = pending_index(netbk->pending_cons++);
1108
1109                                 pending_idx = netbk->pending_ring[index];
1110
1111                                 memcpy(&pending_tx_info[pending_idx].req, txp,
1112                                        sizeof(*txp));
1113                                 xenvif_get(vif);
1114
1115                                 pending_tx_info[pending_idx].vif = vif;
1116
1117                                 /* Poison these fields, corresponding
1118                                  * fields for head tx req will be set
1119                                  * to correct values after the loop.
1120                                  */
1121                                 netbk->mmap_pages[pending_idx] = (void *)(~0UL);
1122                                 pending_tx_info[pending_idx].head =
1123                                         INVALID_PENDING_RING_IDX;
1124
1125                                 if (!first) {
1126                                         first = &pending_tx_info[pending_idx];
1127                                         start_idx = index;
1128                                         head_idx = pending_idx;
1129                                 }
1130
1131                                 txp++;
1132                                 slot++;
1133                         }
1134
1135                         gop++;
1136                 }
1137
1138                 first->req.offset = 0;
1139                 first->req.size = dst_offset;
1140                 first->head = start_idx;
1141                 set_page_ext(page, netbk, head_idx);
1142                 netbk->mmap_pages[head_idx] = page;
1143                 frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
1144         }
1145
1146         BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
1147
1148         return gop;
1149 err:
1150         /* Unwind, freeing all pages and sending error responses. */
1151         while (shinfo->nr_frags-- > start) {
1152                 xen_netbk_idx_release(netbk,
1153                                 frag_get_pending_idx(&frags[shinfo->nr_frags]),
1154                                 XEN_NETIF_RSP_ERROR);
1155         }
1156         /* The head too, if necessary. */
1157         if (start)
1158                 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1159
1160         return NULL;
1161 }
1162
1163 static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1164                                   struct sk_buff *skb,
1165                                   struct gnttab_copy **gopp)
1166 {
1167         struct gnttab_copy *gop = *gopp;
1168         u16 pending_idx = *((u16 *)skb->data);
1169         struct skb_shared_info *shinfo = skb_shinfo(skb);
1170         struct pending_tx_info *tx_info;
1171         int nr_frags = shinfo->nr_frags;
1172         int i, err, start;
1173         u16 peek; /* peek into next tx request */
1174
1175         /* Check status of header. */
1176         err = gop->status;
1177         if (unlikely(err))
1178                 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1179
1180         /* Skip first skb fragment if it is on same page as header fragment. */
1181         start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1182
1183         for (i = start; i < nr_frags; i++) {
1184                 int j, newerr;
1185                 pending_ring_idx_t head;
1186
1187                 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1188                 tx_info = &netbk->pending_tx_info[pending_idx];
1189                 head = tx_info->head;
1190
1191                 /* Check error status: if okay then remember grant handle. */
1192                 do {
1193                         newerr = (++gop)->status;
1194                         if (newerr)
1195                                 break;
1196                         peek = netbk->pending_ring[pending_index(++head)];
1197                 } while (!pending_tx_is_head(netbk, peek));
1198
1199                 if (likely(!newerr)) {
1200                         /* Had a previous error? Invalidate this fragment. */
1201                         if (unlikely(err))
1202                                 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1203                         continue;
1204                 }
1205
1206                 /* Error on this fragment: respond to client with an error. */
1207                 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1208
1209                 /* Not the first error? Preceding frags already invalidated. */
1210                 if (err)
1211                         continue;
1212
1213                 /* First error: invalidate header and preceding fragments. */
1214                 pending_idx = *((u16 *)skb->data);
1215                 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1216                 for (j = start; j < i; j++) {
1217                         pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1218                         xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1219                 }
1220
1221                 /* Remember the error: invalidate all subsequent fragments. */
1222                 err = newerr;
1223         }
1224
1225         *gopp = gop + 1;
1226         return err;
1227 }
1228
1229 static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1230 {
1231         struct skb_shared_info *shinfo = skb_shinfo(skb);
1232         int nr_frags = shinfo->nr_frags;
1233         int i;
1234
1235         for (i = 0; i < nr_frags; i++) {
1236                 skb_frag_t *frag = shinfo->frags + i;
1237                 struct xen_netif_tx_request *txp;
1238                 struct page *page;
1239                 u16 pending_idx;
1240
1241                 pending_idx = frag_get_pending_idx(frag);
1242
1243                 txp = &netbk->pending_tx_info[pending_idx].req;
1244                 page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
1245                 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1246                 skb->len += txp->size;
1247                 skb->data_len += txp->size;
1248                 skb->truesize += txp->size;
1249
1250                 /* Take an extra reference to offset xen_netbk_idx_release */
1251                 get_page(netbk->mmap_pages[pending_idx]);
1252                 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1253         }
1254 }
1255
1256 static int xen_netbk_get_extras(struct xenvif *vif,
1257                                 struct xen_netif_extra_info *extras,
1258                                 int work_to_do)
1259 {
1260         struct xen_netif_extra_info extra;
1261         RING_IDX cons = vif->tx.req_cons;
1262
1263         do {
1264                 if (unlikely(work_to_do-- <= 0)) {
1265                         netdev_err(vif->dev, "Missing extra info\n");
1266                         netbk_fatal_tx_err(vif);
1267                         return -EBADR;
1268                 }
1269
1270                 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1271                        sizeof(extra));
1272                 if (unlikely(!extra.type ||
1273                              extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1274                         vif->tx.req_cons = ++cons;
1275                         netdev_err(vif->dev,
1276                                    "Invalid extra type: %d\n", extra.type);
1277                         netbk_fatal_tx_err(vif);
1278                         return -EINVAL;
1279                 }
1280
1281                 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1282                 vif->tx.req_cons = ++cons;
1283         } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1284
1285         return work_to_do;
1286 }
1287
1288 static int netbk_set_skb_gso(struct xenvif *vif,
1289                              struct sk_buff *skb,
1290                              struct xen_netif_extra_info *gso)
1291 {
1292         if (!gso->u.gso.size) {
1293                 netdev_err(vif->dev, "GSO size must not be zero.\n");
1294                 netbk_fatal_tx_err(vif);
1295                 return -EINVAL;
1296         }
1297
1298         /* Currently only TCPv4 S.O. is supported. */
1299         if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1300                 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1301                 netbk_fatal_tx_err(vif);
1302                 return -EINVAL;
1303         }
1304
1305         skb_shinfo(skb)->gso_size = gso->u.gso.size;
1306         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1307
1308         /* Header must be checked, and gso_segs computed. */
1309         skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1310         skb_shinfo(skb)->gso_segs = 0;
1311
1312         return 0;
1313 }
1314
1315 static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1316 {
1317         struct iphdr *iph;
1318         int err = -EPROTO;
1319         int recalculate_partial_csum = 0;
1320
1321         /*
1322          * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1323          * peers can fail to set NETRXF_csum_blank when sending a GSO
1324          * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1325          * recalculate the partial checksum.
1326          */
1327         if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1328                 vif->rx_gso_checksum_fixup++;
1329                 skb->ip_summed = CHECKSUM_PARTIAL;
1330                 recalculate_partial_csum = 1;
1331         }
1332
1333         /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1334         if (skb->ip_summed != CHECKSUM_PARTIAL)
1335                 return 0;
1336
1337         if (skb->protocol != htons(ETH_P_IP))
1338                 goto out;
1339
1340         iph = (void *)skb->data;
1341         switch (iph->protocol) {
1342         case IPPROTO_TCP:
1343                 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1344                                           offsetof(struct tcphdr, check)))
1345                         goto out;
1346
1347                 if (recalculate_partial_csum) {
1348                         struct tcphdr *tcph = tcp_hdr(skb);
1349                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1350                                                          skb->len - iph->ihl*4,
1351                                                          IPPROTO_TCP, 0);
1352                 }
1353                 break;
1354         case IPPROTO_UDP:
1355                 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1356                                           offsetof(struct udphdr, check)))
1357                         goto out;
1358
1359                 if (recalculate_partial_csum) {
1360                         struct udphdr *udph = udp_hdr(skb);
1361                         udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1362                                                          skb->len - iph->ihl*4,
1363                                                          IPPROTO_UDP, 0);
1364                 }
1365                 break;
1366         default:
1367                 if (net_ratelimit())
1368                         netdev_err(vif->dev,
1369                                    "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1370                                    iph->protocol);
1371                 goto out;
1372         }
1373
1374         err = 0;
1375
1376 out:
1377         return err;
1378 }
1379
1380 static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1381 {
1382         unsigned long now = jiffies;
1383         unsigned long next_credit =
1384                 vif->credit_timeout.expires +
1385                 msecs_to_jiffies(vif->credit_usec / 1000);
1386
1387         /* Timer could already be pending in rare cases. */
1388         if (timer_pending(&vif->credit_timeout))
1389                 return true;
1390
1391         /* Passed the point where we can replenish credit? */
1392         if (time_after_eq(now, next_credit)) {
1393                 vif->credit_timeout.expires = now;
1394                 tx_add_credit(vif);
1395         }
1396
1397         /* Still too big to send right now? Set a callback. */
1398         if (size > vif->remaining_credit) {
1399                 vif->credit_timeout.data     =
1400                         (unsigned long)vif;
1401                 vif->credit_timeout.function =
1402                         tx_credit_callback;
1403                 mod_timer(&vif->credit_timeout,
1404                           next_credit);
1405
1406                 return true;
1407         }
1408
1409         return false;
1410 }
1411
1412 static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1413 {
1414         struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
1415         struct sk_buff *skb;
1416         int ret;
1417
1418         while ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
1419                 < MAX_PENDING_REQS) &&
1420                 !list_empty(&netbk->net_schedule_list)) {
1421                 struct xenvif *vif;
1422                 struct xen_netif_tx_request txreq;
1423                 struct xen_netif_tx_request txfrags[XEN_NETIF_NR_SLOTS_MIN];
1424                 struct page *page;
1425                 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1426                 u16 pending_idx;
1427                 RING_IDX idx;
1428                 int work_to_do;
1429                 unsigned int data_len;
1430                 pending_ring_idx_t index;
1431
1432                 /* Get a netif from the list with work to do. */
1433                 vif = poll_net_schedule_list(netbk);
1434                 /* This can sometimes happen because the test of
1435                  * list_empty(net_schedule_list) at the top of the
1436                  * loop is unlocked.  Just go back and have another
1437                  * look.
1438                  */
1439                 if (!vif)
1440                         continue;
1441
1442                 if (vif->tx.sring->req_prod - vif->tx.req_cons >
1443                     XEN_NETIF_TX_RING_SIZE) {
1444                         netdev_err(vif->dev,
1445                                    "Impossible number of requests. "
1446                                    "req_prod %d, req_cons %d, size %ld\n",
1447                                    vif->tx.sring->req_prod, vif->tx.req_cons,
1448                                    XEN_NETIF_TX_RING_SIZE);
1449                         netbk_fatal_tx_err(vif);
1450                         continue;
1451                 }
1452
1453                 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1454                 if (!work_to_do) {
1455                         xenvif_put(vif);
1456                         continue;
1457                 }
1458
1459                 idx = vif->tx.req_cons;
1460                 rmb(); /* Ensure that we see the request before we copy it. */
1461                 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1462
1463                 /* Credit-based scheduling. */
1464                 if (txreq.size > vif->remaining_credit &&
1465                     tx_credit_exceeded(vif, txreq.size)) {
1466                         xenvif_put(vif);
1467                         continue;
1468                 }
1469
1470                 vif->remaining_credit -= txreq.size;
1471
1472                 work_to_do--;
1473                 vif->tx.req_cons = ++idx;
1474
1475                 memset(extras, 0, sizeof(extras));
1476                 if (txreq.flags & XEN_NETTXF_extra_info) {
1477                         work_to_do = xen_netbk_get_extras(vif, extras,
1478                                                           work_to_do);
1479                         idx = vif->tx.req_cons;
1480                         if (unlikely(work_to_do < 0))
1481                                 continue;
1482                 }
1483
1484                 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1485                 if (unlikely(ret < 0))
1486                         continue;
1487
1488                 idx += ret;
1489
1490                 if (unlikely(txreq.size < ETH_HLEN)) {
1491                         netdev_dbg(vif->dev,
1492                                    "Bad packet size: %d\n", txreq.size);
1493                         netbk_tx_err(vif, &txreq, idx);
1494                         continue;
1495                 }
1496
1497                 /* No crossing a page as the payload mustn't fragment. */
1498                 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1499                         netdev_err(vif->dev,
1500                                    "txreq.offset: %x, size: %u, end: %lu\n",
1501                                    txreq.offset, txreq.size,
1502                                    (txreq.offset&~PAGE_MASK) + txreq.size);
1503                         netbk_fatal_tx_err(vif);
1504                         continue;
1505                 }
1506
1507                 index = pending_index(netbk->pending_cons);
1508                 pending_idx = netbk->pending_ring[index];
1509
1510                 data_len = (txreq.size > PKT_PROT_LEN &&
1511                             ret < XEN_NETIF_NR_SLOTS_MIN) ?
1512                         PKT_PROT_LEN : txreq.size;
1513
1514                 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1515                                 GFP_ATOMIC | __GFP_NOWARN);
1516                 if (unlikely(skb == NULL)) {
1517                         netdev_dbg(vif->dev,
1518                                    "Can't allocate a skb in start_xmit.\n");
1519                         netbk_tx_err(vif, &txreq, idx);
1520                         break;
1521                 }
1522
1523                 /* Packets passed to netif_rx() must have some headroom. */
1524                 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1525
1526                 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1527                         struct xen_netif_extra_info *gso;
1528                         gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1529
1530                         if (netbk_set_skb_gso(vif, skb, gso)) {
1531                                 /* Failure in netbk_set_skb_gso is fatal. */
1532                                 kfree_skb(skb);
1533                                 continue;
1534                         }
1535                 }
1536
1537                 /* XXX could copy straight to head */
1538                 page = xen_netbk_alloc_page(netbk, pending_idx);
1539                 if (!page) {
1540                         kfree_skb(skb);
1541                         netbk_tx_err(vif, &txreq, idx);
1542                         continue;
1543                 }
1544
1545                 gop->source.u.ref = txreq.gref;
1546                 gop->source.domid = vif->domid;
1547                 gop->source.offset = txreq.offset;
1548
1549                 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1550                 gop->dest.domid = DOMID_SELF;
1551                 gop->dest.offset = txreq.offset;
1552
1553                 gop->len = txreq.size;
1554                 gop->flags = GNTCOPY_source_gref;
1555
1556                 gop++;
1557
1558                 memcpy(&netbk->pending_tx_info[pending_idx].req,
1559                        &txreq, sizeof(txreq));
1560                 netbk->pending_tx_info[pending_idx].vif = vif;
1561                 netbk->pending_tx_info[pending_idx].head = index;
1562                 *((u16 *)skb->data) = pending_idx;
1563
1564                 __skb_put(skb, data_len);
1565
1566                 skb_shinfo(skb)->nr_frags = ret;
1567                 if (data_len < txreq.size) {
1568                         skb_shinfo(skb)->nr_frags++;
1569                         frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1570                                              pending_idx);
1571                 } else {
1572                         frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1573                                              INVALID_PENDING_IDX);
1574                 }
1575
1576                 netbk->pending_cons++;
1577
1578                 request_gop = xen_netbk_get_requests(netbk, vif,
1579                                                      skb, txfrags, gop);
1580                 if (request_gop == NULL) {
1581                         kfree_skb(skb);
1582                         netbk_tx_err(vif, &txreq, idx);
1583                         continue;
1584                 }
1585                 gop = request_gop;
1586
1587                 __skb_queue_tail(&netbk->tx_queue, skb);
1588
1589                 vif->tx.req_cons = idx;
1590                 xen_netbk_check_rx_xenvif(vif);
1591
1592                 if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
1593                         break;
1594         }
1595
1596         return gop - netbk->tx_copy_ops;
1597 }
1598
1599 static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1600 {
1601         struct gnttab_copy *gop = netbk->tx_copy_ops;
1602         struct sk_buff *skb;
1603
1604         while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
1605                 struct xen_netif_tx_request *txp;
1606                 struct xenvif *vif;
1607                 u16 pending_idx;
1608                 unsigned data_len;
1609
1610                 pending_idx = *((u16 *)skb->data);
1611                 vif = netbk->pending_tx_info[pending_idx].vif;
1612                 txp = &netbk->pending_tx_info[pending_idx].req;
1613
1614                 /* Check the remap error code. */
1615                 if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
1616                         netdev_dbg(vif->dev, "netback grant failed.\n");
1617                         skb_shinfo(skb)->nr_frags = 0;
1618                         kfree_skb(skb);
1619                         continue;
1620                 }
1621
1622                 data_len = skb->len;
1623                 memcpy(skb->data,
1624                        (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
1625                        data_len);
1626                 if (data_len < txp->size) {
1627                         /* Append the packet payload as a fragment. */
1628                         txp->offset += data_len;
1629                         txp->size -= data_len;
1630                 } else {
1631                         /* Schedule a response immediately. */
1632                         xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1633                 }
1634
1635                 if (txp->flags & XEN_NETTXF_csum_blank)
1636                         skb->ip_summed = CHECKSUM_PARTIAL;
1637                 else if (txp->flags & XEN_NETTXF_data_validated)
1638                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1639
1640                 xen_netbk_fill_frags(netbk, skb);
1641
1642                 /*
1643                  * If the initial fragment was < PKT_PROT_LEN then
1644                  * pull through some bytes from the other fragments to
1645                  * increase the linear region to PKT_PROT_LEN bytes.
1646                  */
1647                 if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
1648                         int target = min_t(int, skb->len, PKT_PROT_LEN);
1649                         __pskb_pull_tail(skb, target - skb_headlen(skb));
1650                 }
1651
1652                 skb->dev      = vif->dev;
1653                 skb->protocol = eth_type_trans(skb, skb->dev);
1654                 skb_reset_network_header(skb);
1655
1656                 if (checksum_setup(vif, skb)) {
1657                         netdev_dbg(vif->dev,
1658                                    "Can't setup checksum in net_tx_action\n");
1659                         kfree_skb(skb);
1660                         continue;
1661                 }
1662
1663                 skb_probe_transport_header(skb, 0);
1664
1665                 vif->dev->stats.rx_bytes += skb->len;
1666                 vif->dev->stats.rx_packets++;
1667
1668                 xenvif_receive_skb(vif, skb);
1669         }
1670 }
1671
1672 /* Called after netfront has transmitted */
1673 static void xen_netbk_tx_action(struct xen_netbk *netbk)
1674 {
1675         unsigned nr_gops;
1676
1677         nr_gops = xen_netbk_tx_build_gops(netbk);
1678
1679         if (nr_gops == 0)
1680                 return;
1681
1682         gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
1683
1684         xen_netbk_tx_submit(netbk);
1685 }
1686
1687 static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1688                                   u8 status)
1689 {
1690         struct xenvif *vif;
1691         struct pending_tx_info *pending_tx_info;
1692         pending_ring_idx_t head;
1693         u16 peek; /* peek into next tx request */
1694
1695         BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
1696
1697         /* Already complete? */
1698         if (netbk->mmap_pages[pending_idx] == NULL)
1699                 return;
1700
1701         pending_tx_info = &netbk->pending_tx_info[pending_idx];
1702
1703         vif = pending_tx_info->vif;
1704         head = pending_tx_info->head;
1705
1706         BUG_ON(!pending_tx_is_head(netbk, head));
1707         BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
1708
1709         do {
1710                 pending_ring_idx_t index;
1711                 pending_ring_idx_t idx = pending_index(head);
1712                 u16 info_idx = netbk->pending_ring[idx];
1713
1714                 pending_tx_info = &netbk->pending_tx_info[info_idx];
1715                 make_tx_response(vif, &pending_tx_info->req, status);
1716
1717                 /* Setting any number other than
1718                  * INVALID_PENDING_RING_IDX indicates this slot is
1719                  * starting a new packet / ending a previous packet.
1720                  */
1721                 pending_tx_info->head = 0;
1722
1723                 index = pending_index(netbk->pending_prod++);
1724                 netbk->pending_ring[index] = netbk->pending_ring[info_idx];
1725
1726                 xenvif_put(vif);
1727
1728                 peek = netbk->pending_ring[pending_index(++head)];
1729
1730         } while (!pending_tx_is_head(netbk, peek));
1731
1732         netbk->mmap_pages[pending_idx]->mapping = 0;
1733         put_page(netbk->mmap_pages[pending_idx]);
1734         netbk->mmap_pages[pending_idx] = NULL;
1735 }
1736
1737
1738 static void make_tx_response(struct xenvif *vif,
1739                              struct xen_netif_tx_request *txp,
1740                              s8       st)
1741 {
1742         RING_IDX i = vif->tx.rsp_prod_pvt;
1743         struct xen_netif_tx_response *resp;
1744         int notify;
1745
1746         resp = RING_GET_RESPONSE(&vif->tx, i);
1747         resp->id     = txp->id;
1748         resp->status = st;
1749
1750         if (txp->flags & XEN_NETTXF_extra_info)
1751                 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1752
1753         vif->tx.rsp_prod_pvt = ++i;
1754         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1755         if (notify)
1756                 notify_remote_via_irq(vif->irq);
1757 }
1758
1759 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1760                                              u16      id,
1761                                              s8       st,
1762                                              u16      offset,
1763                                              u16      size,
1764                                              u16      flags)
1765 {
1766         RING_IDX i = vif->rx.rsp_prod_pvt;
1767         struct xen_netif_rx_response *resp;
1768
1769         resp = RING_GET_RESPONSE(&vif->rx, i);
1770         resp->offset     = offset;
1771         resp->flags      = flags;
1772         resp->id         = id;
1773         resp->status     = (s16)size;
1774         if (st < 0)
1775                 resp->status = (s16)st;
1776
1777         vif->rx.rsp_prod_pvt = ++i;
1778
1779         return resp;
1780 }
1781
1782 static inline int rx_work_todo(struct xen_netbk *netbk)
1783 {
1784         return !skb_queue_empty(&netbk->rx_queue);
1785 }
1786
1787 static inline int tx_work_todo(struct xen_netbk *netbk)
1788 {
1789
1790         if ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
1791              < MAX_PENDING_REQS) &&
1792              !list_empty(&netbk->net_schedule_list))
1793                 return 1;
1794
1795         return 0;
1796 }
1797
1798 static int xen_netbk_kthread(void *data)
1799 {
1800         struct xen_netbk *netbk = data;
1801         while (!kthread_should_stop()) {
1802                 wait_event_interruptible(netbk->wq,
1803                                 rx_work_todo(netbk) ||
1804                                 tx_work_todo(netbk) ||
1805                                 kthread_should_stop());
1806                 cond_resched();
1807
1808                 if (kthread_should_stop())
1809                         break;
1810
1811                 if (rx_work_todo(netbk))
1812                         xen_netbk_rx_action(netbk);
1813
1814                 if (tx_work_todo(netbk))
1815                         xen_netbk_tx_action(netbk);
1816         }
1817
1818         return 0;
1819 }
1820
1821 void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1822 {
1823         if (vif->tx.sring)
1824                 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1825                                         vif->tx.sring);
1826         if (vif->rx.sring)
1827                 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1828                                         vif->rx.sring);
1829 }
1830
1831 int xen_netbk_map_frontend_rings(struct xenvif *vif,
1832                                  grant_ref_t tx_ring_ref,
1833                                  grant_ref_t rx_ring_ref)
1834 {
1835         void *addr;
1836         struct xen_netif_tx_sring *txs;
1837         struct xen_netif_rx_sring *rxs;
1838
1839         int err = -ENOMEM;
1840
1841         err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1842                                      tx_ring_ref, &addr);
1843         if (err)
1844                 goto err;
1845
1846         txs = (struct xen_netif_tx_sring *)addr;
1847         BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1848
1849         err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1850                                      rx_ring_ref, &addr);
1851         if (err)
1852                 goto err;
1853
1854         rxs = (struct xen_netif_rx_sring *)addr;
1855         BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1856
1857         vif->rx_req_cons_peek = 0;
1858
1859         return 0;
1860
1861 err:
1862         xen_netbk_unmap_frontend_rings(vif);
1863         return err;
1864 }
1865
1866 static int __init netback_init(void)
1867 {
1868         int i;
1869         int rc = 0;
1870         int group;
1871
1872         if (!xen_domain())
1873                 return -ENODEV;
1874
1875         if (max_skb_slots < XEN_NETIF_NR_SLOTS_MIN) {
1876                 printk(KERN_INFO
1877                        "xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n",
1878                        max_skb_slots, XEN_NETIF_NR_SLOTS_MIN);
1879                 max_skb_slots = XEN_NETIF_NR_SLOTS_MIN;
1880         }
1881
1882         xen_netbk_group_nr = num_online_cpus();
1883         xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
1884         if (!xen_netbk)
1885                 return -ENOMEM;
1886
1887         for (group = 0; group < xen_netbk_group_nr; group++) {
1888                 struct xen_netbk *netbk = &xen_netbk[group];
1889                 skb_queue_head_init(&netbk->rx_queue);
1890                 skb_queue_head_init(&netbk->tx_queue);
1891
1892                 init_timer(&netbk->net_timer);
1893                 netbk->net_timer.data = (unsigned long)netbk;
1894                 netbk->net_timer.function = xen_netbk_alarm;
1895
1896                 netbk->pending_cons = 0;
1897                 netbk->pending_prod = MAX_PENDING_REQS;
1898                 for (i = 0; i < MAX_PENDING_REQS; i++)
1899                         netbk->pending_ring[i] = i;
1900
1901                 init_waitqueue_head(&netbk->wq);
1902                 netbk->task = kthread_create(xen_netbk_kthread,
1903                                              (void *)netbk,
1904                                              "netback/%u", group);
1905
1906                 if (IS_ERR(netbk->task)) {
1907                         printk(KERN_ALERT "kthread_create() fails at netback\n");
1908                         del_timer(&netbk->net_timer);
1909                         rc = PTR_ERR(netbk->task);
1910                         goto failed_init;
1911                 }
1912
1913                 kthread_bind(netbk->task, group);
1914
1915                 INIT_LIST_HEAD(&netbk->net_schedule_list);
1916
1917                 spin_lock_init(&netbk->net_schedule_list_lock);
1918
1919                 atomic_set(&netbk->netfront_count, 0);
1920
1921                 wake_up_process(netbk->task);
1922         }
1923
1924         rc = xenvif_xenbus_init();
1925         if (rc)
1926                 goto failed_init;
1927
1928         return 0;
1929
1930 failed_init:
1931         while (--group >= 0) {
1932                 struct xen_netbk *netbk = &xen_netbk[group];
1933                 for (i = 0; i < MAX_PENDING_REQS; i++) {
1934                         if (netbk->mmap_pages[i])
1935                                 __free_page(netbk->mmap_pages[i]);
1936                 }
1937                 del_timer(&netbk->net_timer);
1938                 kthread_stop(netbk->task);
1939         }
1940         vfree(xen_netbk);
1941         return rc;
1942
1943 }
1944
1945 module_init(netback_init);
1946
1947 MODULE_LICENSE("Dual BSD/GPL");
1948 MODULE_ALIAS("xen-backend:vif");