2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on net/ipv4/icmp.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
21 * Andi Kleen : exception handling
22 * Andi Kleen add rate limits. never reply to a icmp.
23 * add more length checks and other fixes.
24 * yoshfuji : ensure to sent parameter problem for
26 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
28 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
29 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
32 #define pr_fmt(fmt) "IPv6: " fmt
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
39 #include <linux/kernel.h>
40 #include <linux/sockios.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/netfilter.h>
45 #include <linux/slab.h>
48 #include <linux/sysctl.h>
51 #include <linux/inet.h>
52 #include <linux/netdevice.h>
53 #include <linux/icmpv6.h>
59 #include <net/ip6_checksum.h>
61 #include <net/protocol.h>
63 #include <net/rawv6.h>
64 #include <net/transp_v6.h>
65 #include <net/ip6_route.h>
66 #include <net/addrconf.h>
69 #include <net/inet_common.h>
71 #include <asm/uaccess.h>
74 * The ICMP socket(s). This is the most convenient way to flow control
75 * our ICMP output as well as maintain a clean interface throughout
76 * all layers. All Socketless IP sends will soon be gone.
78 * On SMP we have one ICMP socket per-cpu.
80 static inline struct sock *icmpv6_sk(struct net *net)
82 return net->ipv6.icmp_sk[smp_processor_id()];
85 static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
86 u8 type, u8 code, int offset, __be32 info)
88 /* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
89 struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset);
90 struct net *net = dev_net(skb->dev);
92 if (type == ICMPV6_PKT_TOOBIG)
93 ip6_update_pmtu(skb, net, info, 0, 0);
94 else if (type == NDISC_REDIRECT)
95 ip6_redirect(skb, net, skb->dev->ifindex, 0);
97 if (!(type & ICMPV6_INFOMSG_MASK))
98 if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
99 ping_err(skb, offset, info);
102 static int icmpv6_rcv(struct sk_buff *skb);
104 static const struct inet6_protocol icmpv6_protocol = {
105 .handler = icmpv6_rcv,
106 .err_handler = icmpv6_err,
107 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
110 static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
117 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
118 /* This can happen if the output path (f.e. SIT or
119 * ip6ip6 tunnel) signals dst_link_failure() for an
120 * outgoing ICMP6 packet.
128 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
130 spin_unlock_bh(&sk->sk_lock.slock);
134 * Figure out, may we reply to this packet with icmp error.
136 * We do not reply, if:
137 * - it was icmp error message.
138 * - it is truncated, so that it is known, that protocol is ICMPV6
139 * (i.e. in the middle of some exthdr)
144 static bool is_ineligible(const struct sk_buff *skb)
146 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
147 int len = skb->len - ptr;
148 __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
154 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
157 if (nexthdr == IPPROTO_ICMPV6) {
159 tp = skb_header_pointer(skb,
160 ptr+offsetof(struct icmp6hdr, icmp6_type),
161 sizeof(_type), &_type);
163 !(*tp & ICMPV6_INFOMSG_MASK))
170 * Check the ICMP output rate limit
172 static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
175 struct dst_entry *dst;
176 struct net *net = sock_net(sk);
179 /* Informational messages are not limited. */
180 if (type & ICMPV6_INFOMSG_MASK)
183 /* Do not limit pmtu discovery, it would break it. */
184 if (type == ICMPV6_PKT_TOOBIG)
188 * Look up the output route.
189 * XXX: perhaps the expire for routing entries cloned by
190 * this lookup should be more aggressive (not longer than timeout).
192 dst = ip6_route_output(net, sk, fl6);
194 IP6_INC_STATS(net, ip6_dst_idev(dst),
195 IPSTATS_MIB_OUTNOROUTES);
196 } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
199 struct rt6_info *rt = (struct rt6_info *)dst;
200 int tmo = net->ipv6.sysctl.icmpv6_time;
201 struct inet_peer *peer;
203 /* Give more bandwidth to wider prefixes. */
204 if (rt->rt6i_dst.plen < 128)
205 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
207 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
208 res = inet_peer_xrlim_allow(peer, tmo);
217 * an inline helper for the "simple" if statement below
218 * checks if parameter problem report is caused by an
219 * unrecognized IPv6 option that has the Option Type
220 * highest-order two bits set to 10
223 static bool opt_unrec(struct sk_buff *skb, __u32 offset)
227 offset += skb_network_offset(skb);
228 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
231 return (*op & 0xC0) == 0x80;
234 int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
235 struct icmp6hdr *thdr, int len)
238 struct icmp6hdr *icmp6h;
241 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
244 icmp6h = icmp6_hdr(skb);
245 memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
246 icmp6h->icmp6_cksum = 0;
248 if (skb_queue_len(&sk->sk_write_queue) == 1) {
249 skb->csum = csum_partial(icmp6h,
250 sizeof(struct icmp6hdr), skb->csum);
251 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
253 len, fl6->flowi6_proto,
258 skb_queue_walk(&sk->sk_write_queue, skb) {
259 tmp_csum = csum_add(tmp_csum, skb->csum);
262 tmp_csum = csum_partial(icmp6h,
263 sizeof(struct icmp6hdr), tmp_csum);
264 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr,
266 len, fl6->flowi6_proto,
269 ip6_push_pending_frames(sk);
280 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
282 struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
283 struct sk_buff *org_skb = msg->skb;
286 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
288 skb->csum = csum_block_add(skb->csum, csum, odd);
289 if (!(msg->type & ICMPV6_INFOMSG_MASK))
290 nf_ct_attach(skb, org_skb);
294 #if IS_ENABLED(CONFIG_IPV6_MIP6)
295 static void mip6_addr_swap(struct sk_buff *skb)
297 struct ipv6hdr *iph = ipv6_hdr(skb);
298 struct inet6_skb_parm *opt = IP6CB(skb);
299 struct ipv6_destopt_hao *hao;
304 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
305 if (likely(off >= 0)) {
306 hao = (struct ipv6_destopt_hao *)
307 (skb_network_header(skb) + off);
309 iph->saddr = hao->addr;
315 static inline void mip6_addr_swap(struct sk_buff *skb) {}
318 static struct dst_entry *icmpv6_route_lookup(struct net *net,
323 struct dst_entry *dst, *dst2;
327 err = ip6_dst_lookup(sk, &dst, fl6);
332 * We won't send icmp if the destination is known
335 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
336 LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: acast source\n");
338 return ERR_PTR(-EINVAL);
341 /* No need to clone since we're just using its address. */
344 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0);
349 if (PTR_ERR(dst) == -EPERM)
355 err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6);
357 goto relookup_failed;
359 err = ip6_dst_lookup(sk, &dst2, &fl2);
361 goto relookup_failed;
363 dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP);
373 goto relookup_failed;
383 * Send an ICMP message in response to a packet in error
385 static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
387 struct net *net = dev_net(skb->dev);
388 struct inet6_dev *idev = NULL;
389 struct ipv6hdr *hdr = ipv6_hdr(skb);
391 struct ipv6_pinfo *np;
392 const struct in6_addr *saddr = NULL;
393 struct dst_entry *dst;
394 struct icmp6hdr tmp_hdr;
396 struct icmpv6_msg msg;
403 if ((u8 *)hdr < skb->head ||
404 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
408 * Make sure we respect the rules
409 * i.e. RFC 1885 2.4(e)
410 * Rule (e.1) is enforced by not using icmp6_send
411 * in any code that processes icmp errors.
413 addr_type = ipv6_addr_type(&hdr->daddr);
415 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0))
422 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
423 if (type != ICMPV6_PKT_TOOBIG &&
424 !(type == ICMPV6_PARAMPROB &&
425 code == ICMPV6_UNK_OPTION &&
426 (opt_unrec(skb, info))))
432 addr_type = ipv6_addr_type(&hdr->saddr);
438 if (__ipv6_addr_needs_scope_id(addr_type))
439 iif = skb->dev->ifindex;
442 * Must not send error if the source does not uniquely
443 * identify a single node (RFC2463 Section 2.4).
444 * We check unspecified / multicast addresses here,
445 * and anycast addresses will be checked later.
447 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
448 LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: addr_any/mcast source\n");
453 * Never answer to a ICMP packet.
455 if (is_ineligible(skb)) {
456 LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: no reply to icmp error\n");
462 memset(&fl6, 0, sizeof(fl6));
463 fl6.flowi6_proto = IPPROTO_ICMPV6;
464 fl6.daddr = hdr->saddr;
467 fl6.flowi6_oif = iif;
468 fl6.fl6_icmp_type = type;
469 fl6.fl6_icmp_code = code;
470 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
472 sk = icmpv6_xmit_lock(net);
477 if (!icmpv6_xrlim_allow(sk, type, &fl6))
480 tmp_hdr.icmp6_type = type;
481 tmp_hdr.icmp6_code = code;
482 tmp_hdr.icmp6_cksum = 0;
483 tmp_hdr.icmp6_pointer = htonl(info);
485 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
486 fl6.flowi6_oif = np->mcast_oif;
487 else if (!fl6.flowi6_oif)
488 fl6.flowi6_oif = np->ucast_oif;
490 dst = icmpv6_route_lookup(net, skb, sk, &fl6);
494 if (ipv6_addr_is_multicast(&fl6.daddr))
495 hlimit = np->mcast_hops;
497 hlimit = np->hop_limit;
499 hlimit = ip6_dst_hoplimit(dst);
502 msg.offset = skb_network_offset(skb);
505 len = skb->len - msg.offset;
506 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
508 LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
509 goto out_dst_release;
513 idev = __in6_dev_get(skb->dev);
515 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
516 len + sizeof(struct icmp6hdr),
517 sizeof(struct icmp6hdr), hlimit,
518 np->tclass, NULL, &fl6, (struct rt6_info *)dst,
519 MSG_DONTWAIT, np->dontfrag);
521 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
522 ip6_flush_pending_frames(sk);
524 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
525 len + sizeof(struct icmp6hdr));
531 icmpv6_xmit_unlock(sk);
534 /* Slightly more convenient version of icmp6_send.
536 void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
538 icmp6_send(skb, ICMPV6_PARAMPROB, code, pos);
542 static void icmpv6_echo_reply(struct sk_buff *skb)
544 struct net *net = dev_net(skb->dev);
546 struct inet6_dev *idev;
547 struct ipv6_pinfo *np;
548 const struct in6_addr *saddr = NULL;
549 struct icmp6hdr *icmph = icmp6_hdr(skb);
550 struct icmp6hdr tmp_hdr;
552 struct icmpv6_msg msg;
553 struct dst_entry *dst;
557 saddr = &ipv6_hdr(skb)->daddr;
559 if (!ipv6_unicast_destination(skb))
562 memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
563 tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
565 memset(&fl6, 0, sizeof(fl6));
566 fl6.flowi6_proto = IPPROTO_ICMPV6;
567 fl6.daddr = ipv6_hdr(skb)->saddr;
570 fl6.flowi6_oif = skb->dev->ifindex;
571 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
572 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
574 sk = icmpv6_xmit_lock(net);
579 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
580 fl6.flowi6_oif = np->mcast_oif;
581 else if (!fl6.flowi6_oif)
582 fl6.flowi6_oif = np->ucast_oif;
584 err = ip6_dst_lookup(sk, &dst, &fl6);
587 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
591 if (ipv6_addr_is_multicast(&fl6.daddr))
592 hlimit = np->mcast_hops;
594 hlimit = np->hop_limit;
596 hlimit = ip6_dst_hoplimit(dst);
598 idev = __in6_dev_get(skb->dev);
602 msg.type = ICMPV6_ECHO_REPLY;
604 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
605 sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6,
606 (struct rt6_info *)dst, MSG_DONTWAIT,
610 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
611 ip6_flush_pending_frames(sk);
613 err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
614 skb->len + sizeof(struct icmp6hdr));
618 icmpv6_xmit_unlock(sk);
621 void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
623 const struct inet6_protocol *ipprot;
628 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
631 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
632 if (ipv6_ext_hdr(nexthdr)) {
633 /* now skip over extension headers */
634 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
635 &nexthdr, &frag_off);
639 inner_offset = sizeof(struct ipv6hdr);
642 /* Checkin header including 8 bytes of inner protocol header. */
643 if (!pskb_may_pull(skb, inner_offset+8))
646 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
647 Without this we will not able f.e. to make source routed
649 Corresponding argument (opt) to notifiers is already added.
654 ipprot = rcu_dereference(inet6_protos[nexthdr]);
655 if (ipprot && ipprot->err_handler)
656 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
659 raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
663 * Handle icmp messages
666 static int icmpv6_rcv(struct sk_buff *skb)
668 struct net_device *dev = skb->dev;
669 struct inet6_dev *idev = __in6_dev_get(dev);
670 const struct in6_addr *saddr, *daddr;
671 struct icmp6hdr *hdr;
674 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
675 struct sec_path *sp = skb_sec_path(skb);
678 if (!(sp && sp->xvec[sp->len - 1]->props.flags &
682 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr)))
685 nh = skb_network_offset(skb);
686 skb_set_network_header(skb, sizeof(*hdr));
688 if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
691 skb_set_network_header(skb, nh);
694 ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS);
696 saddr = &ipv6_hdr(skb)->saddr;
697 daddr = &ipv6_hdr(skb)->daddr;
699 /* Perform checksum. */
700 switch (skb->ip_summed) {
701 case CHECKSUM_COMPLETE:
702 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
707 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
709 if (__skb_checksum_complete(skb)) {
710 LIMIT_NETDEBUG(KERN_DEBUG
711 "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
717 if (!pskb_pull(skb, sizeof(*hdr)))
720 hdr = icmp6_hdr(skb);
722 type = hdr->icmp6_type;
724 ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type);
727 case ICMPV6_ECHO_REQUEST:
728 icmpv6_echo_reply(skb);
731 case ICMPV6_ECHO_REPLY:
735 case ICMPV6_PKT_TOOBIG:
736 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
737 standard destination cache. Seems, only "advanced"
738 destination cache will allow to solve this problem
741 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
743 hdr = icmp6_hdr(skb);
746 * Drop through to notify
749 case ICMPV6_DEST_UNREACH:
750 case ICMPV6_TIME_EXCEED:
751 case ICMPV6_PARAMPROB:
752 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
755 case NDISC_ROUTER_SOLICITATION:
756 case NDISC_ROUTER_ADVERTISEMENT:
757 case NDISC_NEIGHBOUR_SOLICITATION:
758 case NDISC_NEIGHBOUR_ADVERTISEMENT:
763 case ICMPV6_MGM_QUERY:
764 igmp6_event_query(skb);
767 case ICMPV6_MGM_REPORT:
768 igmp6_event_report(skb);
771 case ICMPV6_MGM_REDUCTION:
772 case ICMPV6_NI_QUERY:
773 case ICMPV6_NI_REPLY:
774 case ICMPV6_MLD2_REPORT:
775 case ICMPV6_DHAAD_REQUEST:
776 case ICMPV6_DHAAD_REPLY:
777 case ICMPV6_MOBILE_PREFIX_SOL:
778 case ICMPV6_MOBILE_PREFIX_ADV:
782 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
785 if (type & ICMPV6_INFOMSG_MASK)
789 * error of unknown type.
790 * must pass to upper level
793 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
800 ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
802 ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS);
808 void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
810 const struct in6_addr *saddr,
811 const struct in6_addr *daddr,
814 memset(fl6, 0, sizeof(*fl6));
817 fl6->flowi6_proto = IPPROTO_ICMPV6;
818 fl6->fl6_icmp_type = type;
819 fl6->fl6_icmp_code = 0;
820 fl6->flowi6_oif = oif;
821 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
825 * Special lock-class for __icmpv6_sk:
827 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
829 static int __net_init icmpv6_sk_init(struct net *net)
835 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
836 if (net->ipv6.icmp_sk == NULL)
839 for_each_possible_cpu(i) {
840 err = inet_ctl_sock_create(&sk, PF_INET6,
841 SOCK_RAW, IPPROTO_ICMPV6, net);
843 pr_err("Failed to initialize the ICMP6 control socket (err %d)\n",
848 net->ipv6.icmp_sk[i] = sk;
851 * Split off their lock-class, because sk->sk_dst_lock
852 * gets used from softirqs, which is safe for
853 * __icmpv6_sk (because those never get directly used
854 * via userspace syscalls), but unsafe for normal sockets.
856 lockdep_set_class(&sk->sk_dst_lock,
857 &icmpv6_socket_sk_dst_lock_key);
859 /* Enough space for 2 64K ICMP packets, including
860 * sk_buff struct overhead.
862 sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
867 for (j = 0; j < i; j++)
868 inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]);
869 kfree(net->ipv6.icmp_sk);
873 static void __net_exit icmpv6_sk_exit(struct net *net)
877 for_each_possible_cpu(i) {
878 inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
880 kfree(net->ipv6.icmp_sk);
883 static struct pernet_operations icmpv6_sk_ops = {
884 .init = icmpv6_sk_init,
885 .exit = icmpv6_sk_exit,
888 int __init icmpv6_init(void)
892 err = register_pernet_subsys(&icmpv6_sk_ops);
897 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
900 err = inet6_register_icmp_sender(icmp6_send);
906 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
908 pr_err("Failed to register ICMP6 protocol\n");
909 unregister_pernet_subsys(&icmpv6_sk_ops);
913 void icmpv6_cleanup(void)
915 inet6_unregister_icmp_sender(icmp6_send);
916 unregister_pernet_subsys(&icmpv6_sk_ops);
917 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
921 static const struct icmp6_err {
929 { /* ADM_PROHIBITED */
933 { /* Was NOT_NEIGHBOUR, now reserved */
955 int icmpv6_err_convert(u8 type, u8 code, int *err)
962 case ICMPV6_DEST_UNREACH:
964 if (code < ARRAY_SIZE(tab_unreach)) {
965 *err = tab_unreach[code].err;
966 fatal = tab_unreach[code].fatal;
970 case ICMPV6_PKT_TOOBIG:
974 case ICMPV6_PARAMPROB:
979 case ICMPV6_TIME_EXCEED:
986 EXPORT_SYMBOL(icmpv6_err_convert);
989 static struct ctl_table ipv6_icmp_table_template[] = {
991 .procname = "ratelimit",
992 .data = &init_net.ipv6.sysctl.icmpv6_time,
993 .maxlen = sizeof(int),
995 .proc_handler = proc_dointvec_ms_jiffies,
1000 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
1002 struct ctl_table *table;
1004 table = kmemdup(ipv6_icmp_table_template,
1005 sizeof(ipv6_icmp_table_template),
1009 table[0].data = &net->ipv6.sysctl.icmpv6_time;