]> Pileus Git - ~andy/linux/blob - net/ipv6/ip6_output.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[~andy/linux] / net / ipv6 / ip6_output.c
1 /*
2  *      IPv6 output functions
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on linux/net/ipv4/ip_output.c
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  *
15  *      Changes:
16  *      A.N.Kuznetsov   :       airthmetics in fragmentation.
17  *                              extension headers are implemented.
18  *                              route changes now work.
19  *                              ip6_forward does not confuse sniffers.
20  *                              etc.
21  *
22  *      H. von Brand    :       Added missing #include <linux/string.h>
23  *      Imran Patel     :       frag id should be in NBO
24  *      Kazunori MIYAZAWA @USAGI
25  *                      :       add ip6_append_data and related functions
26  *                              for datagram xmit
27  */
28
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
44
45 #include <net/sock.h>
46 #include <net/snmp.h>
47
48 #include <net/ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
54 #include <net/icmp.h>
55 #include <net/xfrm.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
58
59 static int ip6_finish_output2(struct sk_buff *skb)
60 {
61         struct dst_entry *dst = skb_dst(skb);
62         struct net_device *dev = dst->dev;
63         struct neighbour *neigh;
64         struct in6_addr *nexthop;
65         int ret;
66
67         skb->protocol = htons(ETH_P_IPV6);
68         skb->dev = dev;
69
70         if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
71                 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
72
73                 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
74                     ((mroute6_socket(dev_net(dev), skb) &&
75                      !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
76                      ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
77                                          &ipv6_hdr(skb)->saddr))) {
78                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
79
80                         /* Do not check for IFF_ALLMULTI; multicast routing
81                            is not supported in any case.
82                          */
83                         if (newskb)
84                                 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
85                                         newskb, NULL, newskb->dev,
86                                         dev_loopback_xmit);
87
88                         if (ipv6_hdr(skb)->hop_limit == 0) {
89                                 IP6_INC_STATS(dev_net(dev), idev,
90                                               IPSTATS_MIB_OUTDISCARDS);
91                                 kfree_skb(skb);
92                                 return 0;
93                         }
94                 }
95
96                 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
97                                 skb->len);
98
99                 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
100                     IPV6_ADDR_SCOPE_NODELOCAL &&
101                     !(dev->flags & IFF_LOOPBACK)) {
102                         kfree_skb(skb);
103                         return 0;
104                 }
105         }
106
107         rcu_read_lock_bh();
108         nexthop = rt6_nexthop((struct rt6_info *)dst);
109         neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
110         if (unlikely(!neigh))
111                 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
112         if (!IS_ERR(neigh)) {
113                 ret = dst_neigh_output(dst, neigh, skb);
114                 rcu_read_unlock_bh();
115                 return ret;
116         }
117         rcu_read_unlock_bh();
118
119         IP6_INC_STATS(dev_net(dst->dev),
120                       ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
121         kfree_skb(skb);
122         return -EINVAL;
123 }
124
125 static int ip6_finish_output(struct sk_buff *skb)
126 {
127         if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
128             dst_allfrag(skb_dst(skb)) ||
129             (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
130                 return ip6_fragment(skb, ip6_finish_output2);
131         else
132                 return ip6_finish_output2(skb);
133 }
134
135 int ip6_output(struct sk_buff *skb)
136 {
137         struct net_device *dev = skb_dst(skb)->dev;
138         struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
139         if (unlikely(idev->cnf.disable_ipv6)) {
140                 IP6_INC_STATS(dev_net(dev), idev,
141                               IPSTATS_MIB_OUTDISCARDS);
142                 kfree_skb(skb);
143                 return 0;
144         }
145
146         return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
147                             ip6_finish_output,
148                             !(IP6CB(skb)->flags & IP6SKB_REROUTED));
149 }
150
151 /*
152  *      xmit an sk_buff (used by TCP, SCTP and DCCP)
153  */
154
155 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
156              struct ipv6_txoptions *opt, int tclass)
157 {
158         struct net *net = sock_net(sk);
159         struct ipv6_pinfo *np = inet6_sk(sk);
160         struct in6_addr *first_hop = &fl6->daddr;
161         struct dst_entry *dst = skb_dst(skb);
162         struct ipv6hdr *hdr;
163         u8  proto = fl6->flowi6_proto;
164         int seg_len = skb->len;
165         int hlimit = -1;
166         u32 mtu;
167
168         if (opt) {
169                 unsigned int head_room;
170
171                 /* First: exthdrs may take lots of space (~8K for now)
172                    MAX_HEADER is not enough.
173                  */
174                 head_room = opt->opt_nflen + opt->opt_flen;
175                 seg_len += head_room;
176                 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
177
178                 if (skb_headroom(skb) < head_room) {
179                         struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
180                         if (skb2 == NULL) {
181                                 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
182                                               IPSTATS_MIB_OUTDISCARDS);
183                                 kfree_skb(skb);
184                                 return -ENOBUFS;
185                         }
186                         consume_skb(skb);
187                         skb = skb2;
188                         skb_set_owner_w(skb, sk);
189                 }
190                 if (opt->opt_flen)
191                         ipv6_push_frag_opts(skb, opt, &proto);
192                 if (opt->opt_nflen)
193                         ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
194         }
195
196         skb_push(skb, sizeof(struct ipv6hdr));
197         skb_reset_network_header(skb);
198         hdr = ipv6_hdr(skb);
199
200         /*
201          *      Fill in the IPv6 header
202          */
203         if (np)
204                 hlimit = np->hop_limit;
205         if (hlimit < 0)
206                 hlimit = ip6_dst_hoplimit(dst);
207
208         ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
209
210         hdr->payload_len = htons(seg_len);
211         hdr->nexthdr = proto;
212         hdr->hop_limit = hlimit;
213
214         hdr->saddr = fl6->saddr;
215         hdr->daddr = *first_hop;
216
217         skb->protocol = htons(ETH_P_IPV6);
218         skb->priority = sk->sk_priority;
219         skb->mark = sk->sk_mark;
220
221         mtu = dst_mtu(dst);
222         if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
223                 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
224                               IPSTATS_MIB_OUT, skb->len);
225                 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
226                                dst->dev, dst_output);
227         }
228
229         skb->dev = dst->dev;
230         ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
231         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
232         kfree_skb(skb);
233         return -EMSGSIZE;
234 }
235
236 EXPORT_SYMBOL(ip6_xmit);
237
238 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
239 {
240         struct ip6_ra_chain *ra;
241         struct sock *last = NULL;
242
243         read_lock(&ip6_ra_lock);
244         for (ra = ip6_ra_chain; ra; ra = ra->next) {
245                 struct sock *sk = ra->sk;
246                 if (sk && ra->sel == sel &&
247                     (!sk->sk_bound_dev_if ||
248                      sk->sk_bound_dev_if == skb->dev->ifindex)) {
249                         if (last) {
250                                 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
251                                 if (skb2)
252                                         rawv6_rcv(last, skb2);
253                         }
254                         last = sk;
255                 }
256         }
257
258         if (last) {
259                 rawv6_rcv(last, skb);
260                 read_unlock(&ip6_ra_lock);
261                 return 1;
262         }
263         read_unlock(&ip6_ra_lock);
264         return 0;
265 }
266
267 static int ip6_forward_proxy_check(struct sk_buff *skb)
268 {
269         struct ipv6hdr *hdr = ipv6_hdr(skb);
270         u8 nexthdr = hdr->nexthdr;
271         __be16 frag_off;
272         int offset;
273
274         if (ipv6_ext_hdr(nexthdr)) {
275                 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
276                 if (offset < 0)
277                         return 0;
278         } else
279                 offset = sizeof(struct ipv6hdr);
280
281         if (nexthdr == IPPROTO_ICMPV6) {
282                 struct icmp6hdr *icmp6;
283
284                 if (!pskb_may_pull(skb, (skb_network_header(skb) +
285                                          offset + 1 - skb->data)))
286                         return 0;
287
288                 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
289
290                 switch (icmp6->icmp6_type) {
291                 case NDISC_ROUTER_SOLICITATION:
292                 case NDISC_ROUTER_ADVERTISEMENT:
293                 case NDISC_NEIGHBOUR_SOLICITATION:
294                 case NDISC_NEIGHBOUR_ADVERTISEMENT:
295                 case NDISC_REDIRECT:
296                         /* For reaction involving unicast neighbor discovery
297                          * message destined to the proxied address, pass it to
298                          * input function.
299                          */
300                         return 1;
301                 default:
302                         break;
303                 }
304         }
305
306         /*
307          * The proxying router can't forward traffic sent to a link-local
308          * address, so signal the sender and discard the packet. This
309          * behavior is clarified by the MIPv6 specification.
310          */
311         if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
312                 dst_link_failure(skb);
313                 return -1;
314         }
315
316         return 0;
317 }
318
319 static inline int ip6_forward_finish(struct sk_buff *skb)
320 {
321         return dst_output(skb);
322 }
323
324 int ip6_forward(struct sk_buff *skb)
325 {
326         struct dst_entry *dst = skb_dst(skb);
327         struct ipv6hdr *hdr = ipv6_hdr(skb);
328         struct inet6_skb_parm *opt = IP6CB(skb);
329         struct net *net = dev_net(dst->dev);
330         u32 mtu;
331
332         if (net->ipv6.devconf_all->forwarding == 0)
333                 goto error;
334
335         if (skb_warn_if_lro(skb))
336                 goto drop;
337
338         if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
339                 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
340                 goto drop;
341         }
342
343         if (skb->pkt_type != PACKET_HOST)
344                 goto drop;
345
346         skb_forward_csum(skb);
347
348         /*
349          *      We DO NOT make any processing on
350          *      RA packets, pushing them to user level AS IS
351          *      without ane WARRANTY that application will be able
352          *      to interpret them. The reason is that we
353          *      cannot make anything clever here.
354          *
355          *      We are not end-node, so that if packet contains
356          *      AH/ESP, we cannot make anything.
357          *      Defragmentation also would be mistake, RA packets
358          *      cannot be fragmented, because there is no warranty
359          *      that different fragments will go along one path. --ANK
360          */
361         if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
362                 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
363                         return 0;
364         }
365
366         /*
367          *      check and decrement ttl
368          */
369         if (hdr->hop_limit <= 1) {
370                 /* Force OUTPUT device used as source address */
371                 skb->dev = dst->dev;
372                 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
373                 IP6_INC_STATS_BH(net,
374                                  ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
375
376                 kfree_skb(skb);
377                 return -ETIMEDOUT;
378         }
379
380         /* XXX: idev->cnf.proxy_ndp? */
381         if (net->ipv6.devconf_all->proxy_ndp &&
382             pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
383                 int proxied = ip6_forward_proxy_check(skb);
384                 if (proxied > 0)
385                         return ip6_input(skb);
386                 else if (proxied < 0) {
387                         IP6_INC_STATS(net, ip6_dst_idev(dst),
388                                       IPSTATS_MIB_INDISCARDS);
389                         goto drop;
390                 }
391         }
392
393         if (!xfrm6_route_forward(skb)) {
394                 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
395                 goto drop;
396         }
397         dst = skb_dst(skb);
398
399         /* IPv6 specs say nothing about it, but it is clear that we cannot
400            send redirects to source routed frames.
401            We don't send redirects to frames decapsulated from IPsec.
402          */
403         if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
404                 struct in6_addr *target = NULL;
405                 struct inet_peer *peer;
406                 struct rt6_info *rt;
407
408                 /*
409                  *      incoming and outgoing devices are the same
410                  *      send a redirect.
411                  */
412
413                 rt = (struct rt6_info *) dst;
414                 if (rt->rt6i_flags & RTF_GATEWAY)
415                         target = &rt->rt6i_gateway;
416                 else
417                         target = &hdr->daddr;
418
419                 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
420
421                 /* Limit redirects both by destination (here)
422                    and by source (inside ndisc_send_redirect)
423                  */
424                 if (inet_peer_xrlim_allow(peer, 1*HZ))
425                         ndisc_send_redirect(skb, target);
426                 if (peer)
427                         inet_putpeer(peer);
428         } else {
429                 int addrtype = ipv6_addr_type(&hdr->saddr);
430
431                 /* This check is security critical. */
432                 if (addrtype == IPV6_ADDR_ANY ||
433                     addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
434                         goto error;
435                 if (addrtype & IPV6_ADDR_LINKLOCAL) {
436                         icmpv6_send(skb, ICMPV6_DEST_UNREACH,
437                                     ICMPV6_NOT_NEIGHBOUR, 0);
438                         goto error;
439                 }
440         }
441
442         mtu = dst_mtu(dst);
443         if (mtu < IPV6_MIN_MTU)
444                 mtu = IPV6_MIN_MTU;
445
446         if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) ||
447             (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
448                 /* Again, force OUTPUT device used as source address */
449                 skb->dev = dst->dev;
450                 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
451                 IP6_INC_STATS_BH(net,
452                                  ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
453                 IP6_INC_STATS_BH(net,
454                                  ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
455                 kfree_skb(skb);
456                 return -EMSGSIZE;
457         }
458
459         if (skb_cow(skb, dst->dev->hard_header_len)) {
460                 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
461                 goto drop;
462         }
463
464         hdr = ipv6_hdr(skb);
465
466         /* Mangling hops number delayed to point after skb COW */
467
468         hdr->hop_limit--;
469
470         IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
471         IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
472         return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
473                        ip6_forward_finish);
474
475 error:
476         IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
477 drop:
478         kfree_skb(skb);
479         return -EINVAL;
480 }
481
482 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
483 {
484         to->pkt_type = from->pkt_type;
485         to->priority = from->priority;
486         to->protocol = from->protocol;
487         skb_dst_drop(to);
488         skb_dst_set(to, dst_clone(skb_dst(from)));
489         to->dev = from->dev;
490         to->mark = from->mark;
491
492 #ifdef CONFIG_NET_SCHED
493         to->tc_index = from->tc_index;
494 #endif
495         nf_copy(to, from);
496 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
497         to->nf_trace = from->nf_trace;
498 #endif
499         skb_copy_secmark(to, from);
500 }
501
502 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
503 {
504         struct sk_buff *frag;
505         struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
506         struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
507         struct ipv6hdr *tmp_hdr;
508         struct frag_hdr *fh;
509         unsigned int mtu, hlen, left, len;
510         int hroom, troom;
511         __be32 frag_id = 0;
512         int ptr, offset = 0, err=0;
513         u8 *prevhdr, nexthdr = 0;
514         struct net *net = dev_net(skb_dst(skb)->dev);
515
516         hlen = ip6_find_1stfragopt(skb, &prevhdr);
517         nexthdr = *prevhdr;
518
519         mtu = ip6_skb_dst_mtu(skb);
520
521         /* We must not fragment if the socket is set to force MTU discovery
522          * or if the skb it not generated by a local socket.
523          */
524         if (unlikely(!skb->local_df && skb->len > mtu) ||
525                      (IP6CB(skb)->frag_max_size &&
526                       IP6CB(skb)->frag_max_size > mtu)) {
527                 if (skb->sk && dst_allfrag(skb_dst(skb)))
528                         sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
529
530                 skb->dev = skb_dst(skb)->dev;
531                 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
532                 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
533                               IPSTATS_MIB_FRAGFAILS);
534                 kfree_skb(skb);
535                 return -EMSGSIZE;
536         }
537
538         if (np && np->frag_size < mtu) {
539                 if (np->frag_size)
540                         mtu = np->frag_size;
541         }
542         mtu -= hlen + sizeof(struct frag_hdr);
543
544         if (skb_has_frag_list(skb)) {
545                 int first_len = skb_pagelen(skb);
546                 struct sk_buff *frag2;
547
548                 if (first_len - hlen > mtu ||
549                     ((first_len - hlen) & 7) ||
550                     skb_cloned(skb))
551                         goto slow_path;
552
553                 skb_walk_frags(skb, frag) {
554                         /* Correct geometry. */
555                         if (frag->len > mtu ||
556                             ((frag->len & 7) && frag->next) ||
557                             skb_headroom(frag) < hlen)
558                                 goto slow_path_clean;
559
560                         /* Partially cloned skb? */
561                         if (skb_shared(frag))
562                                 goto slow_path_clean;
563
564                         BUG_ON(frag->sk);
565                         if (skb->sk) {
566                                 frag->sk = skb->sk;
567                                 frag->destructor = sock_wfree;
568                         }
569                         skb->truesize -= frag->truesize;
570                 }
571
572                 err = 0;
573                 offset = 0;
574                 frag = skb_shinfo(skb)->frag_list;
575                 skb_frag_list_init(skb);
576                 /* BUILD HEADER */
577
578                 *prevhdr = NEXTHDR_FRAGMENT;
579                 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
580                 if (!tmp_hdr) {
581                         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
582                                       IPSTATS_MIB_FRAGFAILS);
583                         return -ENOMEM;
584                 }
585
586                 __skb_pull(skb, hlen);
587                 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
588                 __skb_push(skb, hlen);
589                 skb_reset_network_header(skb);
590                 memcpy(skb_network_header(skb), tmp_hdr, hlen);
591
592                 ipv6_select_ident(fh, rt);
593                 fh->nexthdr = nexthdr;
594                 fh->reserved = 0;
595                 fh->frag_off = htons(IP6_MF);
596                 frag_id = fh->identification;
597
598                 first_len = skb_pagelen(skb);
599                 skb->data_len = first_len - skb_headlen(skb);
600                 skb->len = first_len;
601                 ipv6_hdr(skb)->payload_len = htons(first_len -
602                                                    sizeof(struct ipv6hdr));
603
604                 dst_hold(&rt->dst);
605
606                 for (;;) {
607                         /* Prepare header of the next frame,
608                          * before previous one went down. */
609                         if (frag) {
610                                 frag->ip_summed = CHECKSUM_NONE;
611                                 skb_reset_transport_header(frag);
612                                 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
613                                 __skb_push(frag, hlen);
614                                 skb_reset_network_header(frag);
615                                 memcpy(skb_network_header(frag), tmp_hdr,
616                                        hlen);
617                                 offset += skb->len - hlen - sizeof(struct frag_hdr);
618                                 fh->nexthdr = nexthdr;
619                                 fh->reserved = 0;
620                                 fh->frag_off = htons(offset);
621                                 if (frag->next != NULL)
622                                         fh->frag_off |= htons(IP6_MF);
623                                 fh->identification = frag_id;
624                                 ipv6_hdr(frag)->payload_len =
625                                                 htons(frag->len -
626                                                       sizeof(struct ipv6hdr));
627                                 ip6_copy_metadata(frag, skb);
628                         }
629
630                         err = output(skb);
631                         if(!err)
632                                 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
633                                               IPSTATS_MIB_FRAGCREATES);
634
635                         if (err || !frag)
636                                 break;
637
638                         skb = frag;
639                         frag = skb->next;
640                         skb->next = NULL;
641                 }
642
643                 kfree(tmp_hdr);
644
645                 if (err == 0) {
646                         IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
647                                       IPSTATS_MIB_FRAGOKS);
648                         ip6_rt_put(rt);
649                         return 0;
650                 }
651
652                 while (frag) {
653                         skb = frag->next;
654                         kfree_skb(frag);
655                         frag = skb;
656                 }
657
658                 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
659                               IPSTATS_MIB_FRAGFAILS);
660                 ip6_rt_put(rt);
661                 return err;
662
663 slow_path_clean:
664                 skb_walk_frags(skb, frag2) {
665                         if (frag2 == frag)
666                                 break;
667                         frag2->sk = NULL;
668                         frag2->destructor = NULL;
669                         skb->truesize += frag2->truesize;
670                 }
671         }
672
673 slow_path:
674         if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
675             skb_checksum_help(skb))
676                 goto fail;
677
678         left = skb->len - hlen;         /* Space per frame */
679         ptr = hlen;                     /* Where to start from */
680
681         /*
682          *      Fragment the datagram.
683          */
684
685         *prevhdr = NEXTHDR_FRAGMENT;
686         hroom = LL_RESERVED_SPACE(rt->dst.dev);
687         troom = rt->dst.dev->needed_tailroom;
688
689         /*
690          *      Keep copying data until we run out.
691          */
692         while(left > 0) {
693                 len = left;
694                 /* IF: it doesn't fit, use 'mtu' - the data space left */
695                 if (len > mtu)
696                         len = mtu;
697                 /* IF: we are not sending up to and including the packet end
698                    then align the next start on an eight byte boundary */
699                 if (len < left) {
700                         len &= ~7;
701                 }
702                 /*
703                  *      Allocate buffer.
704                  */
705
706                 if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
707                                       hroom + troom, GFP_ATOMIC)) == NULL) {
708                         NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
709                         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
710                                       IPSTATS_MIB_FRAGFAILS);
711                         err = -ENOMEM;
712                         goto fail;
713                 }
714
715                 /*
716                  *      Set up data on packet
717                  */
718
719                 ip6_copy_metadata(frag, skb);
720                 skb_reserve(frag, hroom);
721                 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
722                 skb_reset_network_header(frag);
723                 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
724                 frag->transport_header = (frag->network_header + hlen +
725                                           sizeof(struct frag_hdr));
726
727                 /*
728                  *      Charge the memory for the fragment to any owner
729                  *      it might possess
730                  */
731                 if (skb->sk)
732                         skb_set_owner_w(frag, skb->sk);
733
734                 /*
735                  *      Copy the packet header into the new buffer.
736                  */
737                 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
738
739                 /*
740                  *      Build fragment header.
741                  */
742                 fh->nexthdr = nexthdr;
743                 fh->reserved = 0;
744                 if (!frag_id) {
745                         ipv6_select_ident(fh, rt);
746                         frag_id = fh->identification;
747                 } else
748                         fh->identification = frag_id;
749
750                 /*
751                  *      Copy a block of the IP datagram.
752                  */
753                 if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
754                         BUG();
755                 left -= len;
756
757                 fh->frag_off = htons(offset);
758                 if (left > 0)
759                         fh->frag_off |= htons(IP6_MF);
760                 ipv6_hdr(frag)->payload_len = htons(frag->len -
761                                                     sizeof(struct ipv6hdr));
762
763                 ptr += len;
764                 offset += len;
765
766                 /*
767                  *      Put this fragment into the sending queue.
768                  */
769                 err = output(frag);
770                 if (err)
771                         goto fail;
772
773                 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
774                               IPSTATS_MIB_FRAGCREATES);
775         }
776         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
777                       IPSTATS_MIB_FRAGOKS);
778         consume_skb(skb);
779         return err;
780
781 fail:
782         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
783                       IPSTATS_MIB_FRAGFAILS);
784         kfree_skb(skb);
785         return err;
786 }
787
788 static inline int ip6_rt_check(const struct rt6key *rt_key,
789                                const struct in6_addr *fl_addr,
790                                const struct in6_addr *addr_cache)
791 {
792         return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
793                 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
794 }
795
796 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
797                                           struct dst_entry *dst,
798                                           const struct flowi6 *fl6)
799 {
800         struct ipv6_pinfo *np = inet6_sk(sk);
801         struct rt6_info *rt;
802
803         if (!dst)
804                 goto out;
805
806         if (dst->ops->family != AF_INET6) {
807                 dst_release(dst);
808                 return NULL;
809         }
810
811         rt = (struct rt6_info *)dst;
812         /* Yes, checking route validity in not connected
813          * case is not very simple. Take into account,
814          * that we do not support routing by source, TOS,
815          * and MSG_DONTROUTE            --ANK (980726)
816          *
817          * 1. ip6_rt_check(): If route was host route,
818          *    check that cached destination is current.
819          *    If it is network route, we still may
820          *    check its validity using saved pointer
821          *    to the last used address: daddr_cache.
822          *    We do not want to save whole address now,
823          *    (because main consumer of this service
824          *    is tcp, which has not this problem),
825          *    so that the last trick works only on connected
826          *    sockets.
827          * 2. oif also should be the same.
828          */
829         if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
830 #ifdef CONFIG_IPV6_SUBTREES
831             ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
832 #endif
833             (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
834                 dst_release(dst);
835                 dst = NULL;
836         }
837
838 out:
839         return dst;
840 }
841
842 static int ip6_dst_lookup_tail(struct sock *sk,
843                                struct dst_entry **dst, struct flowi6 *fl6)
844 {
845         struct net *net = sock_net(sk);
846 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
847         struct neighbour *n;
848         struct rt6_info *rt;
849 #endif
850         int err;
851
852         if (*dst == NULL)
853                 *dst = ip6_route_output(net, sk, fl6);
854
855         if ((err = (*dst)->error))
856                 goto out_err_release;
857
858         if (ipv6_addr_any(&fl6->saddr)) {
859                 struct rt6_info *rt = (struct rt6_info *) *dst;
860                 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
861                                           sk ? inet6_sk(sk)->srcprefs : 0,
862                                           &fl6->saddr);
863                 if (err)
864                         goto out_err_release;
865         }
866
867 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
868         /*
869          * Here if the dst entry we've looked up
870          * has a neighbour entry that is in the INCOMPLETE
871          * state and the src address from the flow is
872          * marked as OPTIMISTIC, we release the found
873          * dst entry and replace it instead with the
874          * dst entry of the nexthop router
875          */
876         rt = (struct rt6_info *) *dst;
877         rcu_read_lock_bh();
878         n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
879         err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
880         rcu_read_unlock_bh();
881
882         if (err) {
883                 struct inet6_ifaddr *ifp;
884                 struct flowi6 fl_gw6;
885                 int redirect;
886
887                 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
888                                       (*dst)->dev, 1);
889
890                 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
891                 if (ifp)
892                         in6_ifa_put(ifp);
893
894                 if (redirect) {
895                         /*
896                          * We need to get the dst entry for the
897                          * default router instead
898                          */
899                         dst_release(*dst);
900                         memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
901                         memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
902                         *dst = ip6_route_output(net, sk, &fl_gw6);
903                         if ((err = (*dst)->error))
904                                 goto out_err_release;
905                 }
906         }
907 #endif
908
909         return 0;
910
911 out_err_release:
912         if (err == -ENETUNREACH)
913                 IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
914         dst_release(*dst);
915         *dst = NULL;
916         return err;
917 }
918
919 /**
920  *      ip6_dst_lookup - perform route lookup on flow
921  *      @sk: socket which provides route info
922  *      @dst: pointer to dst_entry * for result
923  *      @fl6: flow to lookup
924  *
925  *      This function performs a route lookup on the given flow.
926  *
927  *      It returns zero on success, or a standard errno code on error.
928  */
929 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
930 {
931         *dst = NULL;
932         return ip6_dst_lookup_tail(sk, dst, fl6);
933 }
934 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
935
936 /**
937  *      ip6_dst_lookup_flow - perform route lookup on flow with ipsec
938  *      @sk: socket which provides route info
939  *      @fl6: flow to lookup
940  *      @final_dst: final destination address for ipsec lookup
941  *      @can_sleep: we are in a sleepable context
942  *
943  *      This function performs a route lookup on the given flow.
944  *
945  *      It returns a valid dst pointer on success, or a pointer encoded
946  *      error code.
947  */
948 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
949                                       const struct in6_addr *final_dst,
950                                       bool can_sleep)
951 {
952         struct dst_entry *dst = NULL;
953         int err;
954
955         err = ip6_dst_lookup_tail(sk, &dst, fl6);
956         if (err)
957                 return ERR_PTR(err);
958         if (final_dst)
959                 fl6->daddr = *final_dst;
960         if (can_sleep)
961                 fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
962
963         return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
964 }
965 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
966
967 /**
968  *      ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
969  *      @sk: socket which provides the dst cache and route info
970  *      @fl6: flow to lookup
971  *      @final_dst: final destination address for ipsec lookup
972  *      @can_sleep: we are in a sleepable context
973  *
974  *      This function performs a route lookup on the given flow with the
975  *      possibility of using the cached route in the socket if it is valid.
976  *      It will take the socket dst lock when operating on the dst cache.
977  *      As a result, this function can only be used in process context.
978  *
979  *      It returns a valid dst pointer on success, or a pointer encoded
980  *      error code.
981  */
982 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
983                                          const struct in6_addr *final_dst,
984                                          bool can_sleep)
985 {
986         struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
987         int err;
988
989         dst = ip6_sk_dst_check(sk, dst, fl6);
990
991         err = ip6_dst_lookup_tail(sk, &dst, fl6);
992         if (err)
993                 return ERR_PTR(err);
994         if (final_dst)
995                 fl6->daddr = *final_dst;
996         if (can_sleep)
997                 fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
998
999         return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1000 }
1001 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1002
1003 static inline int ip6_ufo_append_data(struct sock *sk,
1004                         int getfrag(void *from, char *to, int offset, int len,
1005                         int odd, struct sk_buff *skb),
1006                         void *from, int length, int hh_len, int fragheaderlen,
1007                         int transhdrlen, int mtu,unsigned int flags,
1008                         struct rt6_info *rt)
1009
1010 {
1011         struct sk_buff *skb;
1012         struct frag_hdr fhdr;
1013         int err;
1014
1015         /* There is support for UDP large send offload by network
1016          * device, so create one single skb packet containing complete
1017          * udp datagram
1018          */
1019         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1020                 skb = sock_alloc_send_skb(sk,
1021                         hh_len + fragheaderlen + transhdrlen + 20,
1022                         (flags & MSG_DONTWAIT), &err);
1023                 if (skb == NULL)
1024                         return err;
1025
1026                 /* reserve space for Hardware header */
1027                 skb_reserve(skb, hh_len);
1028
1029                 /* create space for UDP/IP header */
1030                 skb_put(skb,fragheaderlen + transhdrlen);
1031
1032                 /* initialize network header pointer */
1033                 skb_reset_network_header(skb);
1034
1035                 /* initialize protocol header pointer */
1036                 skb->transport_header = skb->network_header + fragheaderlen;
1037
1038                 skb->protocol = htons(ETH_P_IPV6);
1039                 skb->csum = 0;
1040
1041                 __skb_queue_tail(&sk->sk_write_queue, skb);
1042         } else if (skb_is_gso(skb)) {
1043                 goto append;
1044         }
1045
1046         skb->ip_summed = CHECKSUM_PARTIAL;
1047         /* Specify the length of each IPv6 datagram fragment.
1048          * It has to be a multiple of 8.
1049          */
1050         skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1051                                      sizeof(struct frag_hdr)) & ~7;
1052         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1053         ipv6_select_ident(&fhdr, rt);
1054         skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1055
1056 append:
1057         return skb_append_datato_frags(sk, skb, getfrag, from,
1058                                        (length - transhdrlen));
1059 }
1060
1061 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1062                                                gfp_t gfp)
1063 {
1064         return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1065 }
1066
1067 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1068                                                 gfp_t gfp)
1069 {
1070         return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1071 }
1072
1073 static void ip6_append_data_mtu(unsigned int *mtu,
1074                                 int *maxfraglen,
1075                                 unsigned int fragheaderlen,
1076                                 struct sk_buff *skb,
1077                                 struct rt6_info *rt,
1078                                 bool pmtuprobe)
1079 {
1080         if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1081                 if (skb == NULL) {
1082                         /* first fragment, reserve header_len */
1083                         *mtu = *mtu - rt->dst.header_len;
1084
1085                 } else {
1086                         /*
1087                          * this fragment is not first, the headers
1088                          * space is regarded as data space.
1089                          */
1090                         *mtu = min(*mtu, pmtuprobe ?
1091                                    rt->dst.dev->mtu :
1092                                    dst_mtu(rt->dst.path));
1093                 }
1094                 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1095                               + fragheaderlen - sizeof(struct frag_hdr);
1096         }
1097 }
1098
1099 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1100         int offset, int len, int odd, struct sk_buff *skb),
1101         void *from, int length, int transhdrlen,
1102         int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1103         struct rt6_info *rt, unsigned int flags, int dontfrag)
1104 {
1105         struct inet_sock *inet = inet_sk(sk);
1106         struct ipv6_pinfo *np = inet6_sk(sk);
1107         struct inet_cork *cork;
1108         struct sk_buff *skb, *skb_prev = NULL;
1109         unsigned int maxfraglen, fragheaderlen, mtu;
1110         int exthdrlen;
1111         int dst_exthdrlen;
1112         int hh_len;
1113         int copy;
1114         int err;
1115         int offset = 0;
1116         __u8 tx_flags = 0;
1117
1118         if (flags&MSG_PROBE)
1119                 return 0;
1120         cork = &inet->cork.base;
1121         if (skb_queue_empty(&sk->sk_write_queue)) {
1122                 /*
1123                  * setup for corking
1124                  */
1125                 if (opt) {
1126                         if (WARN_ON(np->cork.opt))
1127                                 return -EINVAL;
1128
1129                         np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
1130                         if (unlikely(np->cork.opt == NULL))
1131                                 return -ENOBUFS;
1132
1133                         np->cork.opt->tot_len = opt->tot_len;
1134                         np->cork.opt->opt_flen = opt->opt_flen;
1135                         np->cork.opt->opt_nflen = opt->opt_nflen;
1136
1137                         np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1138                                                             sk->sk_allocation);
1139                         if (opt->dst0opt && !np->cork.opt->dst0opt)
1140                                 return -ENOBUFS;
1141
1142                         np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1143                                                             sk->sk_allocation);
1144                         if (opt->dst1opt && !np->cork.opt->dst1opt)
1145                                 return -ENOBUFS;
1146
1147                         np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1148                                                            sk->sk_allocation);
1149                         if (opt->hopopt && !np->cork.opt->hopopt)
1150                                 return -ENOBUFS;
1151
1152                         np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1153                                                             sk->sk_allocation);
1154                         if (opt->srcrt && !np->cork.opt->srcrt)
1155                                 return -ENOBUFS;
1156
1157                         /* need source address above miyazawa*/
1158                 }
1159                 dst_hold(&rt->dst);
1160                 cork->dst = &rt->dst;
1161                 inet->cork.fl.u.ip6 = *fl6;
1162                 np->cork.hop_limit = hlimit;
1163                 np->cork.tclass = tclass;
1164                 if (rt->dst.flags & DST_XFRM_TUNNEL)
1165                         mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1166                               rt->dst.dev->mtu : dst_mtu(&rt->dst);
1167                 else
1168                         mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1169                               rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1170                 if (np->frag_size < mtu) {
1171                         if (np->frag_size)
1172                                 mtu = np->frag_size;
1173                 }
1174                 cork->fragsize = mtu;
1175                 if (dst_allfrag(rt->dst.path))
1176                         cork->flags |= IPCORK_ALLFRAG;
1177                 cork->length = 0;
1178                 exthdrlen = (opt ? opt->opt_flen : 0);
1179                 length += exthdrlen;
1180                 transhdrlen += exthdrlen;
1181                 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1182         } else {
1183                 rt = (struct rt6_info *)cork->dst;
1184                 fl6 = &inet->cork.fl.u.ip6;
1185                 opt = np->cork.opt;
1186                 transhdrlen = 0;
1187                 exthdrlen = 0;
1188                 dst_exthdrlen = 0;
1189                 mtu = cork->fragsize;
1190         }
1191
1192         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1193
1194         fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1195                         (opt ? opt->opt_nflen : 0);
1196         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1197                      sizeof(struct frag_hdr);
1198
1199         if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1200                 unsigned int maxnonfragsize, headersize;
1201
1202                 headersize = sizeof(struct ipv6hdr) +
1203                              (opt ? opt->tot_len : 0) +
1204                              (dst_allfrag(&rt->dst) ?
1205                               sizeof(struct frag_hdr) : 0) +
1206                              rt->rt6i_nfheader_len;
1207
1208                 maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ?
1209                                  mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1210
1211                 /* dontfrag active */
1212                 if ((cork->length + length > mtu - headersize) && dontfrag &&
1213                     (sk->sk_protocol == IPPROTO_UDP ||
1214                      sk->sk_protocol == IPPROTO_RAW)) {
1215                         ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1216                                                    sizeof(struct ipv6hdr));
1217                         goto emsgsize;
1218                 }
1219
1220                 if (cork->length + length > maxnonfragsize - headersize) {
1221 emsgsize:
1222                         ipv6_local_error(sk, EMSGSIZE, fl6,
1223                                          mtu - headersize +
1224                                          sizeof(struct ipv6hdr));
1225                         return -EMSGSIZE;
1226                 }
1227         }
1228
1229         /* For UDP, check if TX timestamp is enabled */
1230         if (sk->sk_type == SOCK_DGRAM)
1231                 sock_tx_timestamp(sk, &tx_flags);
1232
1233         /*
1234          * Let's try using as much space as possible.
1235          * Use MTU if total length of the message fits into the MTU.
1236          * Otherwise, we need to reserve fragment header and
1237          * fragment alignment (= 8-15 octects, in total).
1238          *
1239          * Note that we may need to "move" the data from the tail of
1240          * of the buffer to the new fragment when we split
1241          * the message.
1242          *
1243          * FIXME: It may be fragmented into multiple chunks
1244          *        at once if non-fragmentable extension headers
1245          *        are too large.
1246          * --yoshfuji
1247          */
1248
1249         skb = skb_peek_tail(&sk->sk_write_queue);
1250         cork->length += length;
1251         if (((length > mtu) ||
1252              (skb && skb_is_gso(skb))) &&
1253             (sk->sk_protocol == IPPROTO_UDP) &&
1254             (rt->dst.dev->features & NETIF_F_UFO)) {
1255                 err = ip6_ufo_append_data(sk, getfrag, from, length,
1256                                           hh_len, fragheaderlen,
1257                                           transhdrlen, mtu, flags, rt);
1258                 if (err)
1259                         goto error;
1260                 return 0;
1261         }
1262
1263         if (!skb)
1264                 goto alloc_new_skb;
1265
1266         while (length > 0) {
1267                 /* Check if the remaining data fits into current packet. */
1268                 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1269                 if (copy < length)
1270                         copy = maxfraglen - skb->len;
1271
1272                 if (copy <= 0) {
1273                         char *data;
1274                         unsigned int datalen;
1275                         unsigned int fraglen;
1276                         unsigned int fraggap;
1277                         unsigned int alloclen;
1278 alloc_new_skb:
1279                         /* There's no room in the current skb */
1280                         if (skb)
1281                                 fraggap = skb->len - maxfraglen;
1282                         else
1283                                 fraggap = 0;
1284                         /* update mtu and maxfraglen if necessary */
1285                         if (skb == NULL || skb_prev == NULL)
1286                                 ip6_append_data_mtu(&mtu, &maxfraglen,
1287                                                     fragheaderlen, skb, rt,
1288                                                     np->pmtudisc ==
1289                                                     IPV6_PMTUDISC_PROBE);
1290
1291                         skb_prev = skb;
1292
1293                         /*
1294                          * If remaining data exceeds the mtu,
1295                          * we know we need more fragment(s).
1296                          */
1297                         datalen = length + fraggap;
1298
1299                         if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1300                                 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1301                         if ((flags & MSG_MORE) &&
1302                             !(rt->dst.dev->features&NETIF_F_SG))
1303                                 alloclen = mtu;
1304                         else
1305                                 alloclen = datalen + fragheaderlen;
1306
1307                         alloclen += dst_exthdrlen;
1308
1309                         if (datalen != length + fraggap) {
1310                                 /*
1311                                  * this is not the last fragment, the trailer
1312                                  * space is regarded as data space.
1313                                  */
1314                                 datalen += rt->dst.trailer_len;
1315                         }
1316
1317                         alloclen += rt->dst.trailer_len;
1318                         fraglen = datalen + fragheaderlen;
1319
1320                         /*
1321                          * We just reserve space for fragment header.
1322                          * Note: this may be overallocation if the message
1323                          * (without MSG_MORE) fits into the MTU.
1324                          */
1325                         alloclen += sizeof(struct frag_hdr);
1326
1327                         if (transhdrlen) {
1328                                 skb = sock_alloc_send_skb(sk,
1329                                                 alloclen + hh_len,
1330                                                 (flags & MSG_DONTWAIT), &err);
1331                         } else {
1332                                 skb = NULL;
1333                                 if (atomic_read(&sk->sk_wmem_alloc) <=
1334                                     2 * sk->sk_sndbuf)
1335                                         skb = sock_wmalloc(sk,
1336                                                            alloclen + hh_len, 1,
1337                                                            sk->sk_allocation);
1338                                 if (unlikely(skb == NULL))
1339                                         err = -ENOBUFS;
1340                                 else {
1341                                         /* Only the initial fragment
1342                                          * is time stamped.
1343                                          */
1344                                         tx_flags = 0;
1345                                 }
1346                         }
1347                         if (skb == NULL)
1348                                 goto error;
1349                         /*
1350                          *      Fill in the control structures
1351                          */
1352                         skb->protocol = htons(ETH_P_IPV6);
1353                         skb->ip_summed = CHECKSUM_NONE;
1354                         skb->csum = 0;
1355                         /* reserve for fragmentation and ipsec header */
1356                         skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1357                                     dst_exthdrlen);
1358
1359                         if (sk->sk_type == SOCK_DGRAM)
1360                                 skb_shinfo(skb)->tx_flags = tx_flags;
1361
1362                         /*
1363                          *      Find where to start putting bytes
1364                          */
1365                         data = skb_put(skb, fraglen);
1366                         skb_set_network_header(skb, exthdrlen);
1367                         data += fragheaderlen;
1368                         skb->transport_header = (skb->network_header +
1369                                                  fragheaderlen);
1370                         if (fraggap) {
1371                                 skb->csum = skb_copy_and_csum_bits(
1372                                         skb_prev, maxfraglen,
1373                                         data + transhdrlen, fraggap, 0);
1374                                 skb_prev->csum = csum_sub(skb_prev->csum,
1375                                                           skb->csum);
1376                                 data += fraggap;
1377                                 pskb_trim_unique(skb_prev, maxfraglen);
1378                         }
1379                         copy = datalen - transhdrlen - fraggap;
1380
1381                         if (copy < 0) {
1382                                 err = -EINVAL;
1383                                 kfree_skb(skb);
1384                                 goto error;
1385                         } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1386                                 err = -EFAULT;
1387                                 kfree_skb(skb);
1388                                 goto error;
1389                         }
1390
1391                         offset += copy;
1392                         length -= datalen - fraggap;
1393                         transhdrlen = 0;
1394                         exthdrlen = 0;
1395                         dst_exthdrlen = 0;
1396
1397                         /*
1398                          * Put the packet on the pending queue
1399                          */
1400                         __skb_queue_tail(&sk->sk_write_queue, skb);
1401                         continue;
1402                 }
1403
1404                 if (copy > length)
1405                         copy = length;
1406
1407                 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1408                         unsigned int off;
1409
1410                         off = skb->len;
1411                         if (getfrag(from, skb_put(skb, copy),
1412                                                 offset, copy, off, skb) < 0) {
1413                                 __skb_trim(skb, off);
1414                                 err = -EFAULT;
1415                                 goto error;
1416                         }
1417                 } else {
1418                         int i = skb_shinfo(skb)->nr_frags;
1419                         struct page_frag *pfrag = sk_page_frag(sk);
1420
1421                         err = -ENOMEM;
1422                         if (!sk_page_frag_refill(sk, pfrag))
1423                                 goto error;
1424
1425                         if (!skb_can_coalesce(skb, i, pfrag->page,
1426                                               pfrag->offset)) {
1427                                 err = -EMSGSIZE;
1428                                 if (i == MAX_SKB_FRAGS)
1429                                         goto error;
1430
1431                                 __skb_fill_page_desc(skb, i, pfrag->page,
1432                                                      pfrag->offset, 0);
1433                                 skb_shinfo(skb)->nr_frags = ++i;
1434                                 get_page(pfrag->page);
1435                         }
1436                         copy = min_t(int, copy, pfrag->size - pfrag->offset);
1437                         if (getfrag(from,
1438                                     page_address(pfrag->page) + pfrag->offset,
1439                                     offset, copy, skb->len, skb) < 0)
1440                                 goto error_efault;
1441
1442                         pfrag->offset += copy;
1443                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1444                         skb->len += copy;
1445                         skb->data_len += copy;
1446                         skb->truesize += copy;
1447                         atomic_add(copy, &sk->sk_wmem_alloc);
1448                 }
1449                 offset += copy;
1450                 length -= copy;
1451         }
1452
1453         return 0;
1454
1455 error_efault:
1456         err = -EFAULT;
1457 error:
1458         cork->length -= length;
1459         IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1460         return err;
1461 }
1462 EXPORT_SYMBOL_GPL(ip6_append_data);
1463
1464 static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1465 {
1466         if (np->cork.opt) {
1467                 kfree(np->cork.opt->dst0opt);
1468                 kfree(np->cork.opt->dst1opt);
1469                 kfree(np->cork.opt->hopopt);
1470                 kfree(np->cork.opt->srcrt);
1471                 kfree(np->cork.opt);
1472                 np->cork.opt = NULL;
1473         }
1474
1475         if (inet->cork.base.dst) {
1476                 dst_release(inet->cork.base.dst);
1477                 inet->cork.base.dst = NULL;
1478                 inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1479         }
1480         memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1481 }
1482
1483 int ip6_push_pending_frames(struct sock *sk)
1484 {
1485         struct sk_buff *skb, *tmp_skb;
1486         struct sk_buff **tail_skb;
1487         struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1488         struct inet_sock *inet = inet_sk(sk);
1489         struct ipv6_pinfo *np = inet6_sk(sk);
1490         struct net *net = sock_net(sk);
1491         struct ipv6hdr *hdr;
1492         struct ipv6_txoptions *opt = np->cork.opt;
1493         struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1494         struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1495         unsigned char proto = fl6->flowi6_proto;
1496         int err = 0;
1497
1498         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1499                 goto out;
1500         tail_skb = &(skb_shinfo(skb)->frag_list);
1501
1502         /* move skb->data to ip header from ext header */
1503         if (skb->data < skb_network_header(skb))
1504                 __skb_pull(skb, skb_network_offset(skb));
1505         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1506                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1507                 *tail_skb = tmp_skb;
1508                 tail_skb = &(tmp_skb->next);
1509                 skb->len += tmp_skb->len;
1510                 skb->data_len += tmp_skb->len;
1511                 skb->truesize += tmp_skb->truesize;
1512                 tmp_skb->destructor = NULL;
1513                 tmp_skb->sk = NULL;
1514         }
1515
1516         /* Allow local fragmentation. */
1517         if (np->pmtudisc < IPV6_PMTUDISC_DO)
1518                 skb->local_df = 1;
1519
1520         *final_dst = fl6->daddr;
1521         __skb_pull(skb, skb_network_header_len(skb));
1522         if (opt && opt->opt_flen)
1523                 ipv6_push_frag_opts(skb, opt, &proto);
1524         if (opt && opt->opt_nflen)
1525                 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1526
1527         skb_push(skb, sizeof(struct ipv6hdr));
1528         skb_reset_network_header(skb);
1529         hdr = ipv6_hdr(skb);
1530
1531         ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
1532         hdr->hop_limit = np->cork.hop_limit;
1533         hdr->nexthdr = proto;
1534         hdr->saddr = fl6->saddr;
1535         hdr->daddr = *final_dst;
1536
1537         skb->priority = sk->sk_priority;
1538         skb->mark = sk->sk_mark;
1539
1540         skb_dst_set(skb, dst_clone(&rt->dst));
1541         IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1542         if (proto == IPPROTO_ICMPV6) {
1543                 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1544
1545                 ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
1546                 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1547         }
1548
1549         err = ip6_local_out(skb);
1550         if (err) {
1551                 if (err > 0)
1552                         err = net_xmit_errno(err);
1553                 if (err)
1554                         goto error;
1555         }
1556
1557 out:
1558         ip6_cork_release(inet, np);
1559         return err;
1560 error:
1561         IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1562         goto out;
1563 }
1564 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1565
1566 void ip6_flush_pending_frames(struct sock *sk)
1567 {
1568         struct sk_buff *skb;
1569
1570         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1571                 if (skb_dst(skb))
1572                         IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1573                                       IPSTATS_MIB_OUTDISCARDS);
1574                 kfree_skb(skb);
1575         }
1576
1577         ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1578 }
1579 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);