]> Pileus Git - ~andy/linux/blob - net/ipv4/ip_gre.c
b8bada00d516e134c3856351d993d22819f2d373
[~andy/linux] / net / ipv4 / ip_gre.c
1 /*
2  *      Linux NET3:     GRE over IP protocol decoder.
3  *
4  *      Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  *
11  */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/mroute.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ipip.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50
51 #if IS_ENABLED(CONFIG_IPV6)
52 #include <net/ipv6.h>
53 #include <net/ip6_fib.h>
54 #include <net/ip6_route.h>
55 #endif
56
57 /*
58    Problems & solutions
59    --------------------
60
61    1. The most important issue is detecting local dead loops.
62    They would cause complete host lockup in transmit, which
63    would be "resolved" by stack overflow or, if queueing is enabled,
64    with infinite looping in net_bh.
65
66    We cannot track such dead loops during route installation,
67    it is infeasible task. The most general solutions would be
68    to keep skb->encapsulation counter (sort of local ttl),
69    and silently drop packet when it expires. It is a good
70    solution, but it supposes maintaining new variable in ALL
71    skb, even if no tunneling is used.
72
73    Current solution: xmit_recursion breaks dead loops. This is a percpu
74    counter, since when we enter the first ndo_xmit(), cpu migration is
75    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
76
77    2. Networking dead loops would not kill routers, but would really
78    kill network. IP hop limit plays role of "t->recursion" in this case,
79    if we copy it from packet being encapsulated to upper header.
80    It is very good solution, but it introduces two problems:
81
82    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
83      do not work over tunnels.
84    - traceroute does not work. I planned to relay ICMP from tunnel,
85      so that this problem would be solved and traceroute output
86      would even more informative. This idea appeared to be wrong:
87      only Linux complies to rfc1812 now (yes, guys, Linux is the only
88      true router now :-)), all routers (at least, in neighbourhood of mine)
89      return only 8 bytes of payload. It is the end.
90
91    Hence, if we want that OSPF worked or traceroute said something reasonable,
92    we should search for another solution.
93
94    One of them is to parse packet trying to detect inner encapsulation
95    made by our node. It is difficult or even impossible, especially,
96    taking into account fragmentation. TO be short, ttl is not solution at all.
97
98    Current solution: The solution was UNEXPECTEDLY SIMPLE.
99    We force DF flag on tunnels with preconfigured hop limit,
100    that is ALL. :-) Well, it does not remove the problem completely,
101    but exponential growth of network traffic is changed to linear
102    (branches, that exceed pmtu are pruned) and tunnel mtu
103    rapidly degrades to value <68, where looping stops.
104    Yes, it is not good if there exists a router in the loop,
105    which does not force DF, even when encapsulating packets have DF set.
106    But it is not our problem! Nobody could accuse us, we made
107    all that we could make. Even if it is your gated who injected
108    fatal route to network, even if it were you who configured
109    fatal static route: you are innocent. :-)
110
111
112
113    3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
114    practically identical code. It would be good to glue them
115    together, but it is not very evident, how to make them modular.
116    sit is integral part of IPv6, ipip and gre are naturally modular.
117    We could extract common parts (hash table, ioctl etc)
118    to a separate module (ip_tunnel.c).
119
120    Alexey Kuznetsov.
121  */
122
123 static bool log_ecn_error = true;
124 module_param(log_ecn_error, bool, 0644);
125 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
126
127 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
128 static int ipgre_tunnel_init(struct net_device *dev);
129 static void ipgre_tunnel_setup(struct net_device *dev);
130 static int ipgre_tunnel_bind_dev(struct net_device *dev);
131
132 /* Fallback tunnel: no source, no destination, no key, no options */
133
134 #define HASH_SIZE  16
135
136 static int ipgre_net_id __read_mostly;
137 struct ipgre_net {
138         struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
139
140         struct net_device *fb_tunnel_dev;
141 };
142
143 /* Tunnel hash table */
144
145 /*
146    4 hash tables:
147
148    3: (remote,local)
149    2: (remote,*)
150    1: (*,local)
151    0: (*,*)
152
153    We require exact key match i.e. if a key is present in packet
154    it will match only tunnel with the same key; if it is not present,
155    it will match only keyless tunnel.
156
157    All keysless packets, if not matched configured keyless tunnels
158    will match fallback tunnel.
159  */
160
161 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
162
163 #define tunnels_r_l     tunnels[3]
164 #define tunnels_r       tunnels[2]
165 #define tunnels_l       tunnels[1]
166 #define tunnels_wc      tunnels[0]
167
168 static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
169                                                    struct rtnl_link_stats64 *tot)
170 {
171         int i;
172
173         for_each_possible_cpu(i) {
174                 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
175                 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
176                 unsigned int start;
177
178                 do {
179                         start = u64_stats_fetch_begin_bh(&tstats->syncp);
180                         rx_packets = tstats->rx_packets;
181                         tx_packets = tstats->tx_packets;
182                         rx_bytes = tstats->rx_bytes;
183                         tx_bytes = tstats->tx_bytes;
184                 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
185
186                 tot->rx_packets += rx_packets;
187                 tot->tx_packets += tx_packets;
188                 tot->rx_bytes   += rx_bytes;
189                 tot->tx_bytes   += tx_bytes;
190         }
191
192         tot->multicast = dev->stats.multicast;
193         tot->rx_crc_errors = dev->stats.rx_crc_errors;
194         tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
195         tot->rx_length_errors = dev->stats.rx_length_errors;
196         tot->rx_frame_errors = dev->stats.rx_frame_errors;
197         tot->rx_errors = dev->stats.rx_errors;
198
199         tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
200         tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
201         tot->tx_dropped = dev->stats.tx_dropped;
202         tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
203         tot->tx_errors = dev->stats.tx_errors;
204
205         return tot;
206 }
207
208 /* Does key in tunnel parameters match packet */
209 static bool ipgre_key_match(const struct ip_tunnel_parm *p,
210                             __be16 flags, __be32 key)
211 {
212         if (p->i_flags & GRE_KEY) {
213                 if (flags & GRE_KEY)
214                         return key == p->i_key;
215                 else
216                         return false;   /* key expected, none present */
217         } else
218                 return !(flags & GRE_KEY);
219 }
220
221 /* Given src, dst and key, find appropriate for input tunnel. */
222
223 static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
224                                              __be32 remote, __be32 local,
225                                              __be16 flags, __be32 key,
226                                              __be16 gre_proto)
227 {
228         struct net *net = dev_net(dev);
229         int link = dev->ifindex;
230         unsigned int h0 = HASH(remote);
231         unsigned int h1 = HASH(key);
232         struct ip_tunnel *t, *cand = NULL;
233         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
234         int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
235                        ARPHRD_ETHER : ARPHRD_IPGRE;
236         int score, cand_score = 4;
237
238         for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
239                 if (local != t->parms.iph.saddr ||
240                     remote != t->parms.iph.daddr ||
241                     !(t->dev->flags & IFF_UP))
242                         continue;
243
244                 if (!ipgre_key_match(&t->parms, flags, key))
245                         continue;
246
247                 if (t->dev->type != ARPHRD_IPGRE &&
248                     t->dev->type != dev_type)
249                         continue;
250
251                 score = 0;
252                 if (t->parms.link != link)
253                         score |= 1;
254                 if (t->dev->type != dev_type)
255                         score |= 2;
256                 if (score == 0)
257                         return t;
258
259                 if (score < cand_score) {
260                         cand = t;
261                         cand_score = score;
262                 }
263         }
264
265         for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
266                 if (remote != t->parms.iph.daddr ||
267                     !(t->dev->flags & IFF_UP))
268                         continue;
269
270                 if (!ipgre_key_match(&t->parms, flags, key))
271                         continue;
272
273                 if (t->dev->type != ARPHRD_IPGRE &&
274                     t->dev->type != dev_type)
275                         continue;
276
277                 score = 0;
278                 if (t->parms.link != link)
279                         score |= 1;
280                 if (t->dev->type != dev_type)
281                         score |= 2;
282                 if (score == 0)
283                         return t;
284
285                 if (score < cand_score) {
286                         cand = t;
287                         cand_score = score;
288                 }
289         }
290
291         for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
292                 if ((local != t->parms.iph.saddr &&
293                      (local != t->parms.iph.daddr ||
294                       !ipv4_is_multicast(local))) ||
295                     !(t->dev->flags & IFF_UP))
296                         continue;
297
298                 if (!ipgre_key_match(&t->parms, flags, key))
299                         continue;
300
301                 if (t->dev->type != ARPHRD_IPGRE &&
302                     t->dev->type != dev_type)
303                         continue;
304
305                 score = 0;
306                 if (t->parms.link != link)
307                         score |= 1;
308                 if (t->dev->type != dev_type)
309                         score |= 2;
310                 if (score == 0)
311                         return t;
312
313                 if (score < cand_score) {
314                         cand = t;
315                         cand_score = score;
316                 }
317         }
318
319         for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
320                 if (t->parms.i_key != key ||
321                     !(t->dev->flags & IFF_UP))
322                         continue;
323
324                 if (t->dev->type != ARPHRD_IPGRE &&
325                     t->dev->type != dev_type)
326                         continue;
327
328                 score = 0;
329                 if (t->parms.link != link)
330                         score |= 1;
331                 if (t->dev->type != dev_type)
332                         score |= 2;
333                 if (score == 0)
334                         return t;
335
336                 if (score < cand_score) {
337                         cand = t;
338                         cand_score = score;
339                 }
340         }
341
342         if (cand != NULL)
343                 return cand;
344
345         dev = ign->fb_tunnel_dev;
346         if (dev->flags & IFF_UP)
347                 return netdev_priv(dev);
348
349         return NULL;
350 }
351
352 static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
353                 struct ip_tunnel_parm *parms)
354 {
355         __be32 remote = parms->iph.daddr;
356         __be32 local = parms->iph.saddr;
357         __be32 key = parms->i_key;
358         unsigned int h = HASH(key);
359         int prio = 0;
360
361         if (local)
362                 prio |= 1;
363         if (remote && !ipv4_is_multicast(remote)) {
364                 prio |= 2;
365                 h ^= HASH(remote);
366         }
367
368         return &ign->tunnels[prio][h];
369 }
370
371 static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
372                 struct ip_tunnel *t)
373 {
374         return __ipgre_bucket(ign, &t->parms);
375 }
376
377 static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
378 {
379         struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
380
381         rcu_assign_pointer(t->next, rtnl_dereference(*tp));
382         rcu_assign_pointer(*tp, t);
383 }
384
385 static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
386 {
387         struct ip_tunnel __rcu **tp;
388         struct ip_tunnel *iter;
389
390         for (tp = ipgre_bucket(ign, t);
391              (iter = rtnl_dereference(*tp)) != NULL;
392              tp = &iter->next) {
393                 if (t == iter) {
394                         rcu_assign_pointer(*tp, t->next);
395                         break;
396                 }
397         }
398 }
399
400 static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
401                                            struct ip_tunnel_parm *parms,
402                                            int type)
403 {
404         __be32 remote = parms->iph.daddr;
405         __be32 local = parms->iph.saddr;
406         __be32 key = parms->i_key;
407         int link = parms->link;
408         struct ip_tunnel *t;
409         struct ip_tunnel __rcu **tp;
410         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
411
412         for (tp = __ipgre_bucket(ign, parms);
413              (t = rtnl_dereference(*tp)) != NULL;
414              tp = &t->next)
415                 if (local == t->parms.iph.saddr &&
416                     remote == t->parms.iph.daddr &&
417                     key == t->parms.i_key &&
418                     link == t->parms.link &&
419                     type == t->dev->type)
420                         break;
421
422         return t;
423 }
424
425 static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
426                 struct ip_tunnel_parm *parms, int create)
427 {
428         struct ip_tunnel *t, *nt;
429         struct net_device *dev;
430         char name[IFNAMSIZ];
431         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
432
433         t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE);
434         if (t || !create)
435                 return t;
436
437         if (parms->name[0])
438                 strlcpy(name, parms->name, IFNAMSIZ);
439         else
440                 strcpy(name, "gre%d");
441
442         dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
443         if (!dev)
444                 return NULL;
445
446         dev_net_set(dev, net);
447
448         nt = netdev_priv(dev);
449         nt->parms = *parms;
450         dev->rtnl_link_ops = &ipgre_link_ops;
451
452         dev->mtu = ipgre_tunnel_bind_dev(dev);
453
454         if (register_netdevice(dev) < 0)
455                 goto failed_free;
456
457         /* Can use a lockless transmit, unless we generate output sequences */
458         if (!(nt->parms.o_flags & GRE_SEQ))
459                 dev->features |= NETIF_F_LLTX;
460
461         dev_hold(dev);
462         ipgre_tunnel_link(ign, nt);
463         return nt;
464
465 failed_free:
466         free_netdev(dev);
467         return NULL;
468 }
469
470 static void ipgre_tunnel_uninit(struct net_device *dev)
471 {
472         struct net *net = dev_net(dev);
473         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
474
475         ipgre_tunnel_unlink(ign, netdev_priv(dev));
476         dev_put(dev);
477 }
478
479
480 static void ipgre_err(struct sk_buff *skb, u32 info)
481 {
482
483 /* All the routers (except for Linux) return only
484    8 bytes of packet payload. It means, that precise relaying of
485    ICMP in the real Internet is absolutely infeasible.
486
487    Moreover, Cisco "wise men" put GRE key to the third word
488    in GRE header. It makes impossible maintaining even soft state for keyed
489    GRE tunnels with enabled checksum. Tell them "thank you".
490
491    Well, I wonder, rfc1812 was written by Cisco employee,
492    what the hell these idiots break standards established
493    by themselves???
494  */
495
496         const struct iphdr *iph = (const struct iphdr *)skb->data;
497         __be16       *p = (__be16 *)(skb->data+(iph->ihl<<2));
498         int grehlen = (iph->ihl<<2) + 4;
499         const int type = icmp_hdr(skb)->type;
500         const int code = icmp_hdr(skb)->code;
501         struct ip_tunnel *t;
502         __be16 flags;
503         __be32 key = 0;
504
505         flags = p[0];
506         if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
507                 if (flags&(GRE_VERSION|GRE_ROUTING))
508                         return;
509                 if (flags&GRE_KEY) {
510                         grehlen += 4;
511                         if (flags&GRE_CSUM)
512                                 grehlen += 4;
513                 }
514         }
515
516         /* If only 8 bytes returned, keyed message will be dropped here */
517         if (skb_headlen(skb) < grehlen)
518                 return;
519
520         if (flags & GRE_KEY)
521                 key = *(((__be32 *)p) + (grehlen / 4) - 1);
522
523         switch (type) {
524         default:
525         case ICMP_PARAMETERPROB:
526                 return;
527
528         case ICMP_DEST_UNREACH:
529                 switch (code) {
530                 case ICMP_SR_FAILED:
531                 case ICMP_PORT_UNREACH:
532                         /* Impossible event. */
533                         return;
534                 default:
535                         /* All others are translated to HOST_UNREACH.
536                            rfc2003 contains "deep thoughts" about NET_UNREACH,
537                            I believe they are just ether pollution. --ANK
538                          */
539                         break;
540                 }
541                 break;
542         case ICMP_TIME_EXCEEDED:
543                 if (code != ICMP_EXC_TTL)
544                         return;
545                 break;
546
547         case ICMP_REDIRECT:
548                 break;
549         }
550
551         t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
552                                 flags, key, p[1]);
553
554         if (t == NULL)
555                 return;
556
557         if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
558                 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
559                                  t->parms.link, 0, IPPROTO_GRE, 0);
560                 return;
561         }
562         if (type == ICMP_REDIRECT) {
563                 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
564                               IPPROTO_GRE, 0);
565                 return;
566         }
567         if (t->parms.iph.daddr == 0 ||
568             ipv4_is_multicast(t->parms.iph.daddr))
569                 return;
570
571         if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
572                 return;
573
574         if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
575                 t->err_count++;
576         else
577                 t->err_count = 1;
578         t->err_time = jiffies;
579 }
580
581 static inline u8
582 ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
583 {
584         u8 inner = 0;
585         if (skb->protocol == htons(ETH_P_IP))
586                 inner = old_iph->tos;
587         else if (skb->protocol == htons(ETH_P_IPV6))
588                 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
589         return INET_ECN_encapsulate(tos, inner);
590 }
591
592 static int ipgre_rcv(struct sk_buff *skb)
593 {
594         const struct iphdr *iph;
595         u8     *h;
596         __be16    flags;
597         __sum16   csum = 0;
598         __be32 key = 0;
599         u32    seqno = 0;
600         struct ip_tunnel *tunnel;
601         int    offset = 4;
602         __be16 gre_proto;
603         int    err;
604
605         if (!pskb_may_pull(skb, 16))
606                 goto drop;
607
608         iph = ip_hdr(skb);
609         h = skb->data;
610         flags = *(__be16 *)h;
611
612         if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
613                 /* - Version must be 0.
614                    - We do not support routing headers.
615                  */
616                 if (flags&(GRE_VERSION|GRE_ROUTING))
617                         goto drop;
618
619                 if (flags&GRE_CSUM) {
620                         switch (skb->ip_summed) {
621                         case CHECKSUM_COMPLETE:
622                                 csum = csum_fold(skb->csum);
623                                 if (!csum)
624                                         break;
625                                 /* fall through */
626                         case CHECKSUM_NONE:
627                                 skb->csum = 0;
628                                 csum = __skb_checksum_complete(skb);
629                                 skb->ip_summed = CHECKSUM_COMPLETE;
630                         }
631                         offset += 4;
632                 }
633                 if (flags&GRE_KEY) {
634                         key = *(__be32 *)(h + offset);
635                         offset += 4;
636                 }
637                 if (flags&GRE_SEQ) {
638                         seqno = ntohl(*(__be32 *)(h + offset));
639                         offset += 4;
640                 }
641         }
642
643         gre_proto = *(__be16 *)(h + 2);
644
645         tunnel = ipgre_tunnel_lookup(skb->dev,
646                                      iph->saddr, iph->daddr, flags, key,
647                                      gre_proto);
648         if (tunnel) {
649                 struct pcpu_tstats *tstats;
650
651                 secpath_reset(skb);
652
653                 skb->protocol = gre_proto;
654                 /* WCCP version 1 and 2 protocol decoding.
655                  * - Change protocol to IP
656                  * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
657                  */
658                 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
659                         skb->protocol = htons(ETH_P_IP);
660                         if ((*(h + offset) & 0xF0) != 0x40)
661                                 offset += 4;
662                 }
663
664                 skb->mac_header = skb->network_header;
665                 __pskb_pull(skb, offset);
666                 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
667                 skb->pkt_type = PACKET_HOST;
668 #ifdef CONFIG_NET_IPGRE_BROADCAST
669                 if (ipv4_is_multicast(iph->daddr)) {
670                         /* Looped back packet, drop it! */
671                         if (rt_is_output_route(skb_rtable(skb)))
672                                 goto drop;
673                         tunnel->dev->stats.multicast++;
674                         skb->pkt_type = PACKET_BROADCAST;
675                 }
676 #endif
677
678                 if (((flags&GRE_CSUM) && csum) ||
679                     (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
680                         tunnel->dev->stats.rx_crc_errors++;
681                         tunnel->dev->stats.rx_errors++;
682                         goto drop;
683                 }
684                 if (tunnel->parms.i_flags&GRE_SEQ) {
685                         if (!(flags&GRE_SEQ) ||
686                             (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
687                                 tunnel->dev->stats.rx_fifo_errors++;
688                                 tunnel->dev->stats.rx_errors++;
689                                 goto drop;
690                         }
691                         tunnel->i_seqno = seqno + 1;
692                 }
693
694                 /* Warning: All skb pointers will be invalidated! */
695                 if (tunnel->dev->type == ARPHRD_ETHER) {
696                         if (!pskb_may_pull(skb, ETH_HLEN)) {
697                                 tunnel->dev->stats.rx_length_errors++;
698                                 tunnel->dev->stats.rx_errors++;
699                                 goto drop;
700                         }
701
702                         iph = ip_hdr(skb);
703                         skb->protocol = eth_type_trans(skb, tunnel->dev);
704                         skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
705                 }
706
707                 __skb_tunnel_rx(skb, tunnel->dev);
708
709                 skb_reset_network_header(skb);
710                 err = IP_ECN_decapsulate(iph, skb);
711                 if (unlikely(err)) {
712                         if (log_ecn_error)
713                                 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
714                                                      &iph->saddr, iph->tos);
715                         if (err > 1) {
716                                 ++tunnel->dev->stats.rx_frame_errors;
717                                 ++tunnel->dev->stats.rx_errors;
718                                 goto drop;
719                         }
720                 }
721
722                 tstats = this_cpu_ptr(tunnel->dev->tstats);
723                 u64_stats_update_begin(&tstats->syncp);
724                 tstats->rx_packets++;
725                 tstats->rx_bytes += skb->len;
726                 u64_stats_update_end(&tstats->syncp);
727
728                 gro_cells_receive(&tunnel->gro_cells, skb);
729                 return 0;
730         }
731         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
732
733 drop:
734         kfree_skb(skb);
735         return 0;
736 }
737
738 static struct sk_buff *handle_offloads(struct sk_buff *skb)
739 {
740         int err;
741
742         if (skb_is_gso(skb)) {
743                 err = skb_unclone(skb, GFP_ATOMIC);
744                 if (unlikely(err))
745                         goto error;
746                 skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
747                 return skb;
748         }
749         if (skb->ip_summed != CHECKSUM_PARTIAL)
750                 skb->ip_summed = CHECKSUM_NONE;
751
752         return skb;
753
754 error:
755         kfree_skb(skb);
756         return ERR_PTR(err);
757 }
758
759 static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
760 {
761         struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
762         struct ip_tunnel *tunnel = netdev_priv(dev);
763         const struct iphdr  *old_iph;
764         const struct iphdr  *tiph;
765         struct flowi4 fl4;
766         u8     tos;
767         __be16 df;
768         struct rtable *rt;                      /* Route to the other host */
769         struct net_device *tdev;                /* Device to other host */
770         struct iphdr  *iph;                     /* Our new IP header */
771         unsigned int max_headroom;              /* The extra header space needed */
772         int    gre_hlen;
773         __be32 dst;
774         int    mtu;
775         u8     ttl;
776         int    err;
777         int    pkt_len;
778
779         skb = handle_offloads(skb);
780         if (IS_ERR(skb)) {
781                 dev->stats.tx_dropped++;
782                 return NETDEV_TX_OK;
783         }
784
785         if (!skb->encapsulation) {
786                 skb_reset_inner_headers(skb);
787                 skb->encapsulation = 1;
788         }
789
790         old_iph = ip_hdr(skb);
791
792         if (dev->type == ARPHRD_ETHER)
793                 IPCB(skb)->flags = 0;
794
795         if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
796                 gre_hlen = 0;
797                 if (skb->protocol == htons(ETH_P_IP))
798                         tiph = (const struct iphdr *)skb->data;
799                 else
800                         tiph = &tunnel->parms.iph;
801         } else {
802                 gre_hlen = tunnel->hlen;
803                 tiph = &tunnel->parms.iph;
804         }
805
806         if ((dst = tiph->daddr) == 0) {
807                 /* NBMA tunnel */
808
809                 if (skb_dst(skb) == NULL) {
810                         dev->stats.tx_fifo_errors++;
811                         goto tx_error;
812                 }
813
814                 if (skb->protocol == htons(ETH_P_IP)) {
815                         rt = skb_rtable(skb);
816                         dst = rt_nexthop(rt, old_iph->daddr);
817                 }
818 #if IS_ENABLED(CONFIG_IPV6)
819                 else if (skb->protocol == htons(ETH_P_IPV6)) {
820                         const struct in6_addr *addr6;
821                         struct neighbour *neigh;
822                         bool do_tx_error_icmp;
823                         int addr_type;
824
825                         neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
826                         if (neigh == NULL)
827                                 goto tx_error;
828
829                         addr6 = (const struct in6_addr *)&neigh->primary_key;
830                         addr_type = ipv6_addr_type(addr6);
831
832                         if (addr_type == IPV6_ADDR_ANY) {
833                                 addr6 = &ipv6_hdr(skb)->daddr;
834                                 addr_type = ipv6_addr_type(addr6);
835                         }
836
837                         if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
838                                 do_tx_error_icmp = true;
839                         else {
840                                 do_tx_error_icmp = false;
841                                 dst = addr6->s6_addr32[3];
842                         }
843                         neigh_release(neigh);
844                         if (do_tx_error_icmp)
845                                 goto tx_error_icmp;
846                 }
847 #endif
848                 else
849                         goto tx_error;
850         }
851
852         ttl = tiph->ttl;
853         tos = tiph->tos;
854         if (tos & 0x1) {
855                 tos &= ~0x1;
856                 if (skb->protocol == htons(ETH_P_IP))
857                         tos = old_iph->tos;
858                 else if (skb->protocol == htons(ETH_P_IPV6))
859                         tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
860         }
861
862         rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr,
863                                  tunnel->parms.o_key, RT_TOS(tos),
864                                  tunnel->parms.link);
865         if (IS_ERR(rt)) {
866                 dev->stats.tx_carrier_errors++;
867                 goto tx_error;
868         }
869         tdev = rt->dst.dev;
870
871         if (tdev == dev) {
872                 ip_rt_put(rt);
873                 dev->stats.collisions++;
874                 goto tx_error;
875         }
876
877         df = tiph->frag_off;
878         if (df)
879                 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
880         else
881                 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
882
883         if (skb_dst(skb))
884                 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
885
886         if (skb->protocol == htons(ETH_P_IP)) {
887                 df |= (old_iph->frag_off&htons(IP_DF));
888
889                 if (!skb_is_gso(skb) &&
890                     (old_iph->frag_off&htons(IP_DF)) &&
891                     mtu < ntohs(old_iph->tot_len)) {
892                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
893                         ip_rt_put(rt);
894                         goto tx_error;
895                 }
896         }
897 #if IS_ENABLED(CONFIG_IPV6)
898         else if (skb->protocol == htons(ETH_P_IPV6)) {
899                 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
900
901                 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
902                         if ((tunnel->parms.iph.daddr &&
903                              !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
904                             rt6->rt6i_dst.plen == 128) {
905                                 rt6->rt6i_flags |= RTF_MODIFIED;
906                                 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
907                         }
908                 }
909
910                 if (!skb_is_gso(skb) &&
911                     mtu >= IPV6_MIN_MTU &&
912                     mtu < skb->len - tunnel->hlen + gre_hlen) {
913                         icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
914                         ip_rt_put(rt);
915                         goto tx_error;
916                 }
917         }
918 #endif
919
920         if (tunnel->err_count > 0) {
921                 if (time_before(jiffies,
922                                 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
923                         tunnel->err_count--;
924
925                         dst_link_failure(skb);
926                 } else
927                         tunnel->err_count = 0;
928         }
929
930         max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
931
932         if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
933             (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
934                 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
935                 if (max_headroom > dev->needed_headroom)
936                         dev->needed_headroom = max_headroom;
937                 if (!new_skb) {
938                         ip_rt_put(rt);
939                         dev->stats.tx_dropped++;
940                         dev_kfree_skb(skb);
941                         return NETDEV_TX_OK;
942                 }
943                 if (skb->sk)
944                         skb_set_owner_w(new_skb, skb->sk);
945                 dev_kfree_skb(skb);
946                 skb = new_skb;
947                 old_iph = ip_hdr(skb);
948                 /* Warning : tiph value might point to freed memory */
949         }
950
951         skb_push(skb, gre_hlen);
952         skb_reset_network_header(skb);
953         skb_set_transport_header(skb, sizeof(*iph));
954         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
955         IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
956                               IPSKB_REROUTED);
957         skb_dst_drop(skb);
958         skb_dst_set(skb, &rt->dst);
959
960         /*
961          *      Push down and install the IPIP header.
962          */
963
964         iph                     =       ip_hdr(skb);
965         iph->version            =       4;
966         iph->ihl                =       sizeof(struct iphdr) >> 2;
967         iph->frag_off           =       df;
968         iph->protocol           =       IPPROTO_GRE;
969         iph->tos                =       ipgre_ecn_encapsulate(tos, old_iph, skb);
970         iph->daddr              =       fl4.daddr;
971         iph->saddr              =       fl4.saddr;
972         iph->ttl                =       ttl;
973
974         tunnel_ip_select_ident(skb, old_iph, &rt->dst);
975
976         if (ttl == 0) {
977                 if (skb->protocol == htons(ETH_P_IP))
978                         iph->ttl = old_iph->ttl;
979 #if IS_ENABLED(CONFIG_IPV6)
980                 else if (skb->protocol == htons(ETH_P_IPV6))
981                         iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
982 #endif
983                 else
984                         iph->ttl = ip4_dst_hoplimit(&rt->dst);
985         }
986
987         ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
988         ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
989                                    htons(ETH_P_TEB) : skb->protocol;
990
991         if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
992                 __be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4);
993
994                 if (tunnel->parms.o_flags&GRE_SEQ) {
995                         ++tunnel->o_seqno;
996                         *ptr = htonl(tunnel->o_seqno);
997                         ptr--;
998                 }
999                 if (tunnel->parms.o_flags&GRE_KEY) {
1000                         *ptr = tunnel->parms.o_key;
1001                         ptr--;
1002                 }
1003                 /* Skip GRE checksum if skb is getting offloaded. */
1004                 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE) &&
1005                     (tunnel->parms.o_flags&GRE_CSUM)) {
1006                         int offset = skb_transport_offset(skb);
1007
1008                         if (skb_has_shared_frag(skb)) {
1009                                 err = __skb_linearize(skb);
1010                                 if (err)
1011                                         goto tx_error;
1012                         }
1013
1014                         *ptr = 0;
1015                         *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
1016                                                                  skb->len - offset,
1017                                                                  0));
1018                 }
1019         }
1020
1021         nf_reset(skb);
1022
1023         pkt_len = skb->len - skb_transport_offset(skb);
1024         err = ip_local_out(skb);
1025         if (likely(net_xmit_eval(err) == 0)) {
1026                 u64_stats_update_begin(&tstats->syncp);
1027                 tstats->tx_bytes += pkt_len;
1028                 tstats->tx_packets++;
1029                 u64_stats_update_end(&tstats->syncp);
1030         } else {
1031                 dev->stats.tx_errors++;
1032                 dev->stats.tx_aborted_errors++;
1033         }
1034         return NETDEV_TX_OK;
1035
1036 #if IS_ENABLED(CONFIG_IPV6)
1037 tx_error_icmp:
1038         dst_link_failure(skb);
1039 #endif
1040 tx_error:
1041         dev->stats.tx_errors++;
1042         dev_kfree_skb(skb);
1043         return NETDEV_TX_OK;
1044 }
1045
1046 static int ipgre_tunnel_bind_dev(struct net_device *dev)
1047 {
1048         struct net_device *tdev = NULL;
1049         struct ip_tunnel *tunnel;
1050         const struct iphdr *iph;
1051         int hlen = LL_MAX_HEADER;
1052         int mtu = ETH_DATA_LEN;
1053         int addend = sizeof(struct iphdr) + 4;
1054
1055         tunnel = netdev_priv(dev);
1056         iph = &tunnel->parms.iph;
1057
1058         /* Guess output device to choose reasonable mtu and needed_headroom */
1059
1060         if (iph->daddr) {
1061                 struct flowi4 fl4;
1062                 struct rtable *rt;
1063
1064                 rt = ip_route_output_gre(dev_net(dev), &fl4,
1065                                          iph->daddr, iph->saddr,
1066                                          tunnel->parms.o_key,
1067                                          RT_TOS(iph->tos),
1068                                          tunnel->parms.link);
1069                 if (!IS_ERR(rt)) {
1070                         tdev = rt->dst.dev;
1071                         ip_rt_put(rt);
1072                 }
1073
1074                 if (dev->type != ARPHRD_ETHER)
1075                         dev->flags |= IFF_POINTOPOINT;
1076         }
1077
1078         if (!tdev && tunnel->parms.link)
1079                 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
1080
1081         if (tdev) {
1082                 hlen = tdev->hard_header_len + tdev->needed_headroom;
1083                 mtu = tdev->mtu;
1084         }
1085         dev->iflink = tunnel->parms.link;
1086
1087         /* Precalculate GRE options length */
1088         if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1089                 if (tunnel->parms.o_flags&GRE_CSUM)
1090                         addend += 4;
1091                 if (tunnel->parms.o_flags&GRE_KEY)
1092                         addend += 4;
1093                 if (tunnel->parms.o_flags&GRE_SEQ)
1094                         addend += 4;
1095         }
1096         dev->needed_headroom = addend + hlen;
1097         mtu -= dev->hard_header_len + addend;
1098
1099         if (mtu < 68)
1100                 mtu = 68;
1101
1102         tunnel->hlen = addend;
1103         /* TCP offload with GRE SEQ is not supported. */
1104         if (!(tunnel->parms.o_flags & GRE_SEQ)) {
1105                 /* device supports enc gso offload*/
1106                 if (tdev->hw_enc_features & NETIF_F_GRE_GSO) {
1107                         dev->features           |= NETIF_F_TSO;
1108                         dev->hw_features        |= NETIF_F_TSO;
1109                 } else {
1110                         dev->features           |= NETIF_F_GSO_SOFTWARE;
1111                         dev->hw_features        |= NETIF_F_GSO_SOFTWARE;
1112                 }
1113         }
1114
1115         return mtu;
1116 }
1117
1118 static int
1119 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1120 {
1121         int err = 0;
1122         struct ip_tunnel_parm p;
1123         struct ip_tunnel *t;
1124         struct net *net = dev_net(dev);
1125         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1126
1127         switch (cmd) {
1128         case SIOCGETTUNNEL:
1129                 t = NULL;
1130                 if (dev == ign->fb_tunnel_dev) {
1131                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1132                                 err = -EFAULT;
1133                                 break;
1134                         }
1135                         t = ipgre_tunnel_locate(net, &p, 0);
1136                 }
1137                 if (t == NULL)
1138                         t = netdev_priv(dev);
1139                 memcpy(&p, &t->parms, sizeof(p));
1140                 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1141                         err = -EFAULT;
1142                 break;
1143
1144         case SIOCADDTUNNEL:
1145         case SIOCCHGTUNNEL:
1146                 err = -EPERM;
1147                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1148                         goto done;
1149
1150                 err = -EFAULT;
1151                 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1152                         goto done;
1153
1154                 err = -EINVAL;
1155                 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1156                     p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1157                     ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1158                         goto done;
1159                 if (p.iph.ttl)
1160                         p.iph.frag_off |= htons(IP_DF);
1161
1162                 if (!(p.i_flags&GRE_KEY))
1163                         p.i_key = 0;
1164                 if (!(p.o_flags&GRE_KEY))
1165                         p.o_key = 0;
1166
1167                 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
1168
1169                 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1170                         if (t != NULL) {
1171                                 if (t->dev != dev) {
1172                                         err = -EEXIST;
1173                                         break;
1174                                 }
1175                         } else {
1176                                 unsigned int nflags = 0;
1177
1178                                 t = netdev_priv(dev);
1179
1180                                 if (ipv4_is_multicast(p.iph.daddr))
1181                                         nflags = IFF_BROADCAST;
1182                                 else if (p.iph.daddr)
1183                                         nflags = IFF_POINTOPOINT;
1184
1185                                 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1186                                         err = -EINVAL;
1187                                         break;
1188                                 }
1189                                 ipgre_tunnel_unlink(ign, t);
1190                                 synchronize_net();
1191                                 t->parms.iph.saddr = p.iph.saddr;
1192                                 t->parms.iph.daddr = p.iph.daddr;
1193                                 t->parms.i_key = p.i_key;
1194                                 t->parms.o_key = p.o_key;
1195                                 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1196                                 memcpy(dev->broadcast, &p.iph.daddr, 4);
1197                                 ipgre_tunnel_link(ign, t);
1198                                 netdev_state_change(dev);
1199                         }
1200                 }
1201
1202                 if (t) {
1203                         err = 0;
1204                         if (cmd == SIOCCHGTUNNEL) {
1205                                 t->parms.iph.ttl = p.iph.ttl;
1206                                 t->parms.iph.tos = p.iph.tos;
1207                                 t->parms.iph.frag_off = p.iph.frag_off;
1208                                 if (t->parms.link != p.link) {
1209                                         t->parms.link = p.link;
1210                                         dev->mtu = ipgre_tunnel_bind_dev(dev);
1211                                         netdev_state_change(dev);
1212                                 }
1213                         }
1214                         if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1215                                 err = -EFAULT;
1216                 } else
1217                         err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1218                 break;
1219
1220         case SIOCDELTUNNEL:
1221                 err = -EPERM;
1222                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1223                         goto done;
1224
1225                 if (dev == ign->fb_tunnel_dev) {
1226                         err = -EFAULT;
1227                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1228                                 goto done;
1229                         err = -ENOENT;
1230                         if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
1231                                 goto done;
1232                         err = -EPERM;
1233                         if (t == netdev_priv(ign->fb_tunnel_dev))
1234                                 goto done;
1235                         dev = t->dev;
1236                 }
1237                 unregister_netdevice(dev);
1238                 err = 0;
1239                 break;
1240
1241         default:
1242                 err = -EINVAL;
1243         }
1244
1245 done:
1246         return err;
1247 }
1248
1249 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1250 {
1251         struct ip_tunnel *tunnel = netdev_priv(dev);
1252         if (new_mtu < 68 ||
1253             new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1254                 return -EINVAL;
1255         dev->mtu = new_mtu;
1256         return 0;
1257 }
1258
1259 /* Nice toy. Unfortunately, useless in real life :-)
1260    It allows to construct virtual multiprotocol broadcast "LAN"
1261    over the Internet, provided multicast routing is tuned.
1262
1263
1264    I have no idea was this bicycle invented before me,
1265    so that I had to set ARPHRD_IPGRE to a random value.
1266    I have an impression, that Cisco could make something similar,
1267    but this feature is apparently missing in IOS<=11.2(8).
1268
1269    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1270    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1271
1272    ping -t 255 224.66.66.66
1273
1274    If nobody answers, mbone does not work.
1275
1276    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1277    ip addr add 10.66.66.<somewhat>/24 dev Universe
1278    ifconfig Universe up
1279    ifconfig Universe add fe80::<Your_real_addr>/10
1280    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1281    ftp 10.66.66.66
1282    ...
1283    ftp fec0:6666:6666::193.233.7.65
1284    ...
1285
1286  */
1287
1288 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1289                         unsigned short type,
1290                         const void *daddr, const void *saddr, unsigned int len)
1291 {
1292         struct ip_tunnel *t = netdev_priv(dev);
1293         struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1294         __be16 *p = (__be16 *)(iph+1);
1295
1296         memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1297         p[0]            = t->parms.o_flags;
1298         p[1]            = htons(type);
1299
1300         /*
1301          *      Set the source hardware address.
1302          */
1303
1304         if (saddr)
1305                 memcpy(&iph->saddr, saddr, 4);
1306         if (daddr)
1307                 memcpy(&iph->daddr, daddr, 4);
1308         if (iph->daddr)
1309                 return t->hlen;
1310
1311         return -t->hlen;
1312 }
1313
1314 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1315 {
1316         const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
1317         memcpy(haddr, &iph->saddr, 4);
1318         return 4;
1319 }
1320
1321 static const struct header_ops ipgre_header_ops = {
1322         .create = ipgre_header,
1323         .parse  = ipgre_header_parse,
1324 };
1325
1326 #ifdef CONFIG_NET_IPGRE_BROADCAST
1327 static int ipgre_open(struct net_device *dev)
1328 {
1329         struct ip_tunnel *t = netdev_priv(dev);
1330
1331         if (ipv4_is_multicast(t->parms.iph.daddr)) {
1332                 struct flowi4 fl4;
1333                 struct rtable *rt;
1334
1335                 rt = ip_route_output_gre(dev_net(dev), &fl4,
1336                                          t->parms.iph.daddr,
1337                                          t->parms.iph.saddr,
1338                                          t->parms.o_key,
1339                                          RT_TOS(t->parms.iph.tos),
1340                                          t->parms.link);
1341                 if (IS_ERR(rt))
1342                         return -EADDRNOTAVAIL;
1343                 dev = rt->dst.dev;
1344                 ip_rt_put(rt);
1345                 if (__in_dev_get_rtnl(dev) == NULL)
1346                         return -EADDRNOTAVAIL;
1347                 t->mlink = dev->ifindex;
1348                 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1349         }
1350         return 0;
1351 }
1352
1353 static int ipgre_close(struct net_device *dev)
1354 {
1355         struct ip_tunnel *t = netdev_priv(dev);
1356
1357         if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1358                 struct in_device *in_dev;
1359                 in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1360                 if (in_dev)
1361                         ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1362         }
1363         return 0;
1364 }
1365
1366 #endif
1367
1368 static const struct net_device_ops ipgre_netdev_ops = {
1369         .ndo_init               = ipgre_tunnel_init,
1370         .ndo_uninit             = ipgre_tunnel_uninit,
1371 #ifdef CONFIG_NET_IPGRE_BROADCAST
1372         .ndo_open               = ipgre_open,
1373         .ndo_stop               = ipgre_close,
1374 #endif
1375         .ndo_start_xmit         = ipgre_tunnel_xmit,
1376         .ndo_do_ioctl           = ipgre_tunnel_ioctl,
1377         .ndo_change_mtu         = ipgre_tunnel_change_mtu,
1378         .ndo_get_stats64        = ipgre_get_stats64,
1379 };
1380
1381 static void ipgre_dev_free(struct net_device *dev)
1382 {
1383         struct ip_tunnel *tunnel = netdev_priv(dev);
1384
1385         gro_cells_destroy(&tunnel->gro_cells);
1386         free_percpu(dev->tstats);
1387         free_netdev(dev);
1388 }
1389
1390 #define GRE_FEATURES (NETIF_F_SG |              \
1391                       NETIF_F_FRAGLIST |        \
1392                       NETIF_F_HIGHDMA |         \
1393                       NETIF_F_HW_CSUM)
1394
1395 static void ipgre_tunnel_setup(struct net_device *dev)
1396 {
1397         dev->netdev_ops         = &ipgre_netdev_ops;
1398         dev->destructor         = ipgre_dev_free;
1399
1400         dev->type               = ARPHRD_IPGRE;
1401         dev->needed_headroom    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1402         dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1403         dev->flags              = IFF_NOARP;
1404         dev->iflink             = 0;
1405         dev->addr_len           = 4;
1406         dev->features           |= NETIF_F_NETNS_LOCAL;
1407         dev->priv_flags         &= ~IFF_XMIT_DST_RELEASE;
1408
1409         dev->features           |= GRE_FEATURES;
1410         dev->hw_features        |= GRE_FEATURES;
1411 }
1412
1413 static int ipgre_tunnel_init(struct net_device *dev)
1414 {
1415         struct ip_tunnel *tunnel;
1416         struct iphdr *iph;
1417         int err;
1418
1419         tunnel = netdev_priv(dev);
1420         iph = &tunnel->parms.iph;
1421
1422         tunnel->dev = dev;
1423         strcpy(tunnel->parms.name, dev->name);
1424
1425         memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1426         memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1427
1428         if (iph->daddr) {
1429 #ifdef CONFIG_NET_IPGRE_BROADCAST
1430                 if (ipv4_is_multicast(iph->daddr)) {
1431                         if (!iph->saddr)
1432                                 return -EINVAL;
1433                         dev->flags = IFF_BROADCAST;
1434                         dev->header_ops = &ipgre_header_ops;
1435                 }
1436 #endif
1437         } else
1438                 dev->header_ops = &ipgre_header_ops;
1439
1440         dev->tstats = alloc_percpu(struct pcpu_tstats);
1441         if (!dev->tstats)
1442                 return -ENOMEM;
1443
1444         err = gro_cells_init(&tunnel->gro_cells, dev);
1445         if (err) {
1446                 free_percpu(dev->tstats);
1447                 return err;
1448         }
1449
1450         return 0;
1451 }
1452
1453 static void ipgre_fb_tunnel_init(struct net_device *dev)
1454 {
1455         struct ip_tunnel *tunnel = netdev_priv(dev);
1456         struct iphdr *iph = &tunnel->parms.iph;
1457
1458         tunnel->dev = dev;
1459         strcpy(tunnel->parms.name, dev->name);
1460
1461         iph->version            = 4;
1462         iph->protocol           = IPPROTO_GRE;
1463         iph->ihl                = 5;
1464         tunnel->hlen            = sizeof(struct iphdr) + 4;
1465
1466         dev_hold(dev);
1467 }
1468
1469
1470 static const struct gre_protocol ipgre_protocol = {
1471         .handler     = ipgre_rcv,
1472         .err_handler = ipgre_err,
1473 };
1474
1475 static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1476 {
1477         int prio;
1478
1479         for (prio = 0; prio < 4; prio++) {
1480                 int h;
1481                 for (h = 0; h < HASH_SIZE; h++) {
1482                         struct ip_tunnel *t;
1483
1484                         t = rtnl_dereference(ign->tunnels[prio][h]);
1485
1486                         while (t != NULL) {
1487                                 unregister_netdevice_queue(t->dev, head);
1488                                 t = rtnl_dereference(t->next);
1489                         }
1490                 }
1491         }
1492 }
1493
1494 static int __net_init ipgre_init_net(struct net *net)
1495 {
1496         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1497         int err;
1498
1499         ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1500                                            ipgre_tunnel_setup);
1501         if (!ign->fb_tunnel_dev) {
1502                 err = -ENOMEM;
1503                 goto err_alloc_dev;
1504         }
1505         dev_net_set(ign->fb_tunnel_dev, net);
1506
1507         ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1508         ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1509
1510         if ((err = register_netdev(ign->fb_tunnel_dev)))
1511                 goto err_reg_dev;
1512
1513         rcu_assign_pointer(ign->tunnels_wc[0],
1514                            netdev_priv(ign->fb_tunnel_dev));
1515         return 0;
1516
1517 err_reg_dev:
1518         ipgre_dev_free(ign->fb_tunnel_dev);
1519 err_alloc_dev:
1520         return err;
1521 }
1522
1523 static void __net_exit ipgre_exit_net(struct net *net)
1524 {
1525         struct ipgre_net *ign;
1526         LIST_HEAD(list);
1527
1528         ign = net_generic(net, ipgre_net_id);
1529         rtnl_lock();
1530         ipgre_destroy_tunnels(ign, &list);
1531         unregister_netdevice_many(&list);
1532         rtnl_unlock();
1533 }
1534
1535 static struct pernet_operations ipgre_net_ops = {
1536         .init = ipgre_init_net,
1537         .exit = ipgre_exit_net,
1538         .id   = &ipgre_net_id,
1539         .size = sizeof(struct ipgre_net),
1540 };
1541
1542 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1543 {
1544         __be16 flags;
1545
1546         if (!data)
1547                 return 0;
1548
1549         flags = 0;
1550         if (data[IFLA_GRE_IFLAGS])
1551                 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1552         if (data[IFLA_GRE_OFLAGS])
1553                 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1554         if (flags & (GRE_VERSION|GRE_ROUTING))
1555                 return -EINVAL;
1556
1557         return 0;
1558 }
1559
1560 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1561 {
1562         __be32 daddr;
1563
1564         if (tb[IFLA_ADDRESS]) {
1565                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1566                         return -EINVAL;
1567                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1568                         return -EADDRNOTAVAIL;
1569         }
1570
1571         if (!data)
1572                 goto out;
1573
1574         if (data[IFLA_GRE_REMOTE]) {
1575                 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1576                 if (!daddr)
1577                         return -EINVAL;
1578         }
1579
1580 out:
1581         return ipgre_tunnel_validate(tb, data);
1582 }
1583
1584 static void ipgre_netlink_parms(struct nlattr *data[],
1585                                 struct ip_tunnel_parm *parms)
1586 {
1587         memset(parms, 0, sizeof(*parms));
1588
1589         parms->iph.protocol = IPPROTO_GRE;
1590
1591         if (!data)
1592                 return;
1593
1594         if (data[IFLA_GRE_LINK])
1595                 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1596
1597         if (data[IFLA_GRE_IFLAGS])
1598                 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1599
1600         if (data[IFLA_GRE_OFLAGS])
1601                 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1602
1603         if (data[IFLA_GRE_IKEY])
1604                 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1605
1606         if (data[IFLA_GRE_OKEY])
1607                 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1608
1609         if (data[IFLA_GRE_LOCAL])
1610                 parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
1611
1612         if (data[IFLA_GRE_REMOTE])
1613                 parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
1614
1615         if (data[IFLA_GRE_TTL])
1616                 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1617
1618         if (data[IFLA_GRE_TOS])
1619                 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1620
1621         if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1622                 parms->iph.frag_off = htons(IP_DF);
1623 }
1624
1625 static int ipgre_tap_init(struct net_device *dev)
1626 {
1627         struct ip_tunnel *tunnel;
1628
1629         tunnel = netdev_priv(dev);
1630
1631         tunnel->dev = dev;
1632         strcpy(tunnel->parms.name, dev->name);
1633
1634         ipgre_tunnel_bind_dev(dev);
1635
1636         dev->tstats = alloc_percpu(struct pcpu_tstats);
1637         if (!dev->tstats)
1638                 return -ENOMEM;
1639
1640         return 0;
1641 }
1642
1643 static const struct net_device_ops ipgre_tap_netdev_ops = {
1644         .ndo_init               = ipgre_tap_init,
1645         .ndo_uninit             = ipgre_tunnel_uninit,
1646         .ndo_start_xmit         = ipgre_tunnel_xmit,
1647         .ndo_set_mac_address    = eth_mac_addr,
1648         .ndo_validate_addr      = eth_validate_addr,
1649         .ndo_change_mtu         = ipgre_tunnel_change_mtu,
1650         .ndo_get_stats64        = ipgre_get_stats64,
1651 };
1652
1653 static void ipgre_tap_setup(struct net_device *dev)
1654 {
1655
1656         ether_setup(dev);
1657
1658         dev->netdev_ops         = &ipgre_tap_netdev_ops;
1659         dev->destructor         = ipgre_dev_free;
1660
1661         dev->iflink             = 0;
1662         dev->features           |= NETIF_F_NETNS_LOCAL;
1663
1664         dev->features           |= GRE_FEATURES;
1665         dev->hw_features        |= GRE_FEATURES;
1666 }
1667
1668 static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
1669                          struct nlattr *data[])
1670 {
1671         struct ip_tunnel *nt;
1672         struct net *net = dev_net(dev);
1673         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1674         int mtu;
1675         int err;
1676
1677         nt = netdev_priv(dev);
1678         ipgre_netlink_parms(data, &nt->parms);
1679
1680         if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1681                 return -EEXIST;
1682
1683         if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1684                 eth_hw_addr_random(dev);
1685
1686         mtu = ipgre_tunnel_bind_dev(dev);
1687         if (!tb[IFLA_MTU])
1688                 dev->mtu = mtu;
1689
1690         /* Can use a lockless transmit, unless we generate output sequences */
1691         if (!(nt->parms.o_flags & GRE_SEQ))
1692                 dev->features |= NETIF_F_LLTX;
1693
1694         err = register_netdevice(dev);
1695         if (err)
1696                 goto out;
1697
1698         dev_hold(dev);
1699         ipgre_tunnel_link(ign, nt);
1700
1701 out:
1702         return err;
1703 }
1704
1705 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1706                             struct nlattr *data[])
1707 {
1708         struct ip_tunnel *t, *nt;
1709         struct net *net = dev_net(dev);
1710         struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1711         struct ip_tunnel_parm p;
1712         int mtu;
1713
1714         if (dev == ign->fb_tunnel_dev)
1715                 return -EINVAL;
1716
1717         nt = netdev_priv(dev);
1718         ipgre_netlink_parms(data, &p);
1719
1720         t = ipgre_tunnel_locate(net, &p, 0);
1721
1722         if (t) {
1723                 if (t->dev != dev)
1724                         return -EEXIST;
1725         } else {
1726                 t = nt;
1727
1728                 if (dev->type != ARPHRD_ETHER) {
1729                         unsigned int nflags = 0;
1730
1731                         if (ipv4_is_multicast(p.iph.daddr))
1732                                 nflags = IFF_BROADCAST;
1733                         else if (p.iph.daddr)
1734                                 nflags = IFF_POINTOPOINT;
1735
1736                         if ((dev->flags ^ nflags) &
1737                             (IFF_POINTOPOINT | IFF_BROADCAST))
1738                                 return -EINVAL;
1739                 }
1740
1741                 ipgre_tunnel_unlink(ign, t);
1742                 t->parms.iph.saddr = p.iph.saddr;
1743                 t->parms.iph.daddr = p.iph.daddr;
1744                 t->parms.i_key = p.i_key;
1745                 if (dev->type != ARPHRD_ETHER) {
1746                         memcpy(dev->dev_addr, &p.iph.saddr, 4);
1747                         memcpy(dev->broadcast, &p.iph.daddr, 4);
1748                 }
1749                 ipgre_tunnel_link(ign, t);
1750                 netdev_state_change(dev);
1751         }
1752
1753         t->parms.o_key = p.o_key;
1754         t->parms.iph.ttl = p.iph.ttl;
1755         t->parms.iph.tos = p.iph.tos;
1756         t->parms.iph.frag_off = p.iph.frag_off;
1757
1758         if (t->parms.link != p.link) {
1759                 t->parms.link = p.link;
1760                 mtu = ipgre_tunnel_bind_dev(dev);
1761                 if (!tb[IFLA_MTU])
1762                         dev->mtu = mtu;
1763                 netdev_state_change(dev);
1764         }
1765
1766         return 0;
1767 }
1768
1769 static size_t ipgre_get_size(const struct net_device *dev)
1770 {
1771         return
1772                 /* IFLA_GRE_LINK */
1773                 nla_total_size(4) +
1774                 /* IFLA_GRE_IFLAGS */
1775                 nla_total_size(2) +
1776                 /* IFLA_GRE_OFLAGS */
1777                 nla_total_size(2) +
1778                 /* IFLA_GRE_IKEY */
1779                 nla_total_size(4) +
1780                 /* IFLA_GRE_OKEY */
1781                 nla_total_size(4) +
1782                 /* IFLA_GRE_LOCAL */
1783                 nla_total_size(4) +
1784                 /* IFLA_GRE_REMOTE */
1785                 nla_total_size(4) +
1786                 /* IFLA_GRE_TTL */
1787                 nla_total_size(1) +
1788                 /* IFLA_GRE_TOS */
1789                 nla_total_size(1) +
1790                 /* IFLA_GRE_PMTUDISC */
1791                 nla_total_size(1) +
1792                 0;
1793 }
1794
1795 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1796 {
1797         struct ip_tunnel *t = netdev_priv(dev);
1798         struct ip_tunnel_parm *p = &t->parms;
1799
1800         if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1801             nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
1802             nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
1803             nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1804             nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1805             nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1806             nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1807             nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1808             nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1809             nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1810                        !!(p->iph.frag_off & htons(IP_DF))))
1811                 goto nla_put_failure;
1812         return 0;
1813
1814 nla_put_failure:
1815         return -EMSGSIZE;
1816 }
1817
1818 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1819         [IFLA_GRE_LINK]         = { .type = NLA_U32 },
1820         [IFLA_GRE_IFLAGS]       = { .type = NLA_U16 },
1821         [IFLA_GRE_OFLAGS]       = { .type = NLA_U16 },
1822         [IFLA_GRE_IKEY]         = { .type = NLA_U32 },
1823         [IFLA_GRE_OKEY]         = { .type = NLA_U32 },
1824         [IFLA_GRE_LOCAL]        = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1825         [IFLA_GRE_REMOTE]       = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1826         [IFLA_GRE_TTL]          = { .type = NLA_U8 },
1827         [IFLA_GRE_TOS]          = { .type = NLA_U8 },
1828         [IFLA_GRE_PMTUDISC]     = { .type = NLA_U8 },
1829 };
1830
1831 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1832         .kind           = "gre",
1833         .maxtype        = IFLA_GRE_MAX,
1834         .policy         = ipgre_policy,
1835         .priv_size      = sizeof(struct ip_tunnel),
1836         .setup          = ipgre_tunnel_setup,
1837         .validate       = ipgre_tunnel_validate,
1838         .newlink        = ipgre_newlink,
1839         .changelink     = ipgre_changelink,
1840         .get_size       = ipgre_get_size,
1841         .fill_info      = ipgre_fill_info,
1842 };
1843
1844 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1845         .kind           = "gretap",
1846         .maxtype        = IFLA_GRE_MAX,
1847         .policy         = ipgre_policy,
1848         .priv_size      = sizeof(struct ip_tunnel),
1849         .setup          = ipgre_tap_setup,
1850         .validate       = ipgre_tap_validate,
1851         .newlink        = ipgre_newlink,
1852         .changelink     = ipgre_changelink,
1853         .get_size       = ipgre_get_size,
1854         .fill_info      = ipgre_fill_info,
1855 };
1856
1857 /*
1858  *      And now the modules code and kernel interface.
1859  */
1860
1861 static int __init ipgre_init(void)
1862 {
1863         int err;
1864
1865         pr_info("GRE over IPv4 tunneling driver\n");
1866
1867         err = register_pernet_device(&ipgre_net_ops);
1868         if (err < 0)
1869                 return err;
1870
1871         err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1872         if (err < 0) {
1873                 pr_info("%s: can't add protocol\n", __func__);
1874                 goto add_proto_failed;
1875         }
1876
1877         err = rtnl_link_register(&ipgre_link_ops);
1878         if (err < 0)
1879                 goto rtnl_link_failed;
1880
1881         err = rtnl_link_register(&ipgre_tap_ops);
1882         if (err < 0)
1883                 goto tap_ops_failed;
1884
1885 out:
1886         return err;
1887
1888 tap_ops_failed:
1889         rtnl_link_unregister(&ipgre_link_ops);
1890 rtnl_link_failed:
1891         gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1892 add_proto_failed:
1893         unregister_pernet_device(&ipgre_net_ops);
1894         goto out;
1895 }
1896
1897 static void __exit ipgre_fini(void)
1898 {
1899         rtnl_link_unregister(&ipgre_tap_ops);
1900         rtnl_link_unregister(&ipgre_link_ops);
1901         if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1902                 pr_info("%s: can't remove protocol\n", __func__);
1903         unregister_pernet_device(&ipgre_net_ops);
1904 }
1905
1906 module_init(ipgre_init);
1907 module_exit(ipgre_fini);
1908 MODULE_LICENSE("GPL");
1909 MODULE_ALIAS_RTNL_LINK("gre");
1910 MODULE_ALIAS_RTNL_LINK("gretap");
1911 MODULE_ALIAS_NETDEV("gre0");