]> Pileus Git - ~andy/linux/blob - net/ieee802154/6lowpan.c
Linux 3.14
[~andy/linux] / net / ieee802154 / 6lowpan.c
1 /*
2  * Copyright 2011, Siemens AG
3  * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
4  */
5
6 /*
7  * Based on patches from Jon Smirl <jonsmirl@gmail.com>
8  * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22  */
23
24 /* Jon's code is based on 6lowpan implementation for Contiki which is:
25  * Copyright (c) 2008, Swedish Institute of Computer Science.
26  * All rights reserved.
27  *
28  * Redistribution and use in source and binary forms, with or without
29  * modification, are permitted provided that the following conditions
30  * are met:
31  * 1. Redistributions of source code must retain the above copyright
32  *    notice, this list of conditions and the following disclaimer.
33  * 2. Redistributions in binary form must reproduce the above copyright
34  *    notice, this list of conditions and the following disclaimer in the
35  *    documentation and/or other materials provided with the distribution.
36  * 3. Neither the name of the Institute nor the names of its contributors
37  *    may be used to endorse or promote products derived from this software
38  *    without specific prior written permission.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  */
52
53 #include <linux/bitops.h>
54 #include <linux/if_arp.h>
55 #include <linux/module.h>
56 #include <linux/moduleparam.h>
57 #include <linux/netdevice.h>
58 #include <net/af_ieee802154.h>
59 #include <net/ieee802154.h>
60 #include <net/ieee802154_netdev.h>
61 #include <net/ipv6.h>
62
63 #include "6lowpan.h"
64
65 static LIST_HEAD(lowpan_devices);
66
67 /* private device info */
68 struct lowpan_dev_info {
69         struct net_device       *real_dev; /* real WPAN device ptr */
70         struct mutex            dev_list_mtx; /* mutex for list ops */
71         unsigned short          fragment_tag;
72 };
73
74 struct lowpan_dev_record {
75         struct net_device *ldev;
76         struct list_head list;
77 };
78
79 struct lowpan_fragment {
80         struct sk_buff          *skb;           /* skb to be assembled */
81         u16                     length;         /* length to be assemled */
82         u32                     bytes_rcv;      /* bytes received */
83         u16                     tag;            /* current fragment tag */
84         struct timer_list       timer;          /* assembling timer */
85         struct list_head        list;           /* fragments list */
86 };
87
88 static LIST_HEAD(lowpan_fragments);
89 static DEFINE_SPINLOCK(flist_lock);
90
91 static inline struct
92 lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
93 {
94         return netdev_priv(dev);
95 }
96
97 static inline void lowpan_address_flip(u8 *src, u8 *dest)
98 {
99         int i;
100         for (i = 0; i < IEEE802154_ADDR_LEN; i++)
101                 (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
102 }
103
104 static int lowpan_header_create(struct sk_buff *skb,
105                            struct net_device *dev,
106                            unsigned short type, const void *_daddr,
107                            const void *_saddr, unsigned int len)
108 {
109         const u8 *saddr = _saddr;
110         const u8 *daddr = _daddr;
111         struct ieee802154_addr sa, da;
112
113         /* TODO:
114          * if this package isn't ipv6 one, where should it be routed?
115          */
116         if (type != ETH_P_IPV6)
117                 return 0;
118
119         if (!saddr)
120                 saddr = dev->dev_addr;
121
122         raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
123         raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
124
125         lowpan_header_compress(skb, dev, type, daddr, saddr, len);
126
127         /*
128          * NOTE1: I'm still unsure about the fact that compression and WPAN
129          * header are created here and not later in the xmit. So wait for
130          * an opinion of net maintainers.
131          */
132         /*
133          * NOTE2: to be absolutely correct, we must derive PANid information
134          * from MAC subif of the 'dev' and 'real_dev' network devices, but
135          * this isn't implemented in mainline yet, so currently we assign 0xff
136          */
137         mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
138         mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
139
140         /* prepare wpan address data */
141         sa.addr_type = IEEE802154_ADDR_LONG;
142         sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
143
144         memcpy(&(sa.hwaddr), saddr, 8);
145         /* intra-PAN communications */
146         da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
147
148         /*
149          * if the destination address is the broadcast address, use the
150          * corresponding short address
151          */
152         if (lowpan_is_addr_broadcast(daddr)) {
153                 da.addr_type = IEEE802154_ADDR_SHORT;
154                 da.short_addr = IEEE802154_ADDR_BROADCAST;
155         } else {
156                 da.addr_type = IEEE802154_ADDR_LONG;
157                 memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
158
159                 /* request acknowledgment */
160                 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
161         }
162
163         return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
164                         type, (void *)&da, (void *)&sa, skb->len);
165 }
166
167 static int lowpan_give_skb_to_devices(struct sk_buff *skb,
168                                         struct net_device *dev)
169 {
170         struct lowpan_dev_record *entry;
171         struct sk_buff *skb_cp;
172         int stat = NET_RX_SUCCESS;
173
174         rcu_read_lock();
175         list_for_each_entry_rcu(entry, &lowpan_devices, list)
176                 if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
177                         skb_cp = skb_copy(skb, GFP_ATOMIC);
178                         if (!skb_cp) {
179                                 stat = -ENOMEM;
180                                 break;
181                         }
182
183                         skb_cp->dev = entry->ldev;
184                         stat = netif_rx(skb_cp);
185                 }
186         rcu_read_unlock();
187
188         return stat;
189 }
190
191 static void lowpan_fragment_timer_expired(unsigned long entry_addr)
192 {
193         struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
194
195         pr_debug("timer expired for frame with tag %d\n", entry->tag);
196
197         list_del(&entry->list);
198         dev_kfree_skb(entry->skb);
199         kfree(entry);
200 }
201
202 static struct lowpan_fragment *
203 lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
204 {
205         struct lowpan_fragment *frame;
206
207         frame = kzalloc(sizeof(struct lowpan_fragment),
208                         GFP_ATOMIC);
209         if (!frame)
210                 goto frame_err;
211
212         INIT_LIST_HEAD(&frame->list);
213
214         frame->length = len;
215         frame->tag = tag;
216
217         /* allocate buffer for frame assembling */
218         frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
219                                                sizeof(struct ipv6hdr));
220
221         if (!frame->skb)
222                 goto skb_err;
223
224         frame->skb->priority = skb->priority;
225
226         /* reserve headroom for uncompressed ipv6 header */
227         skb_reserve(frame->skb, sizeof(struct ipv6hdr));
228         skb_put(frame->skb, frame->length);
229
230         /* copy the first control block to keep a
231          * trace of the link-layer addresses in case
232          * of a link-local compressed address
233          */
234         memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
235
236         init_timer(&frame->timer);
237         /* time out is the same as for ipv6 - 60 sec */
238         frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
239         frame->timer.data = (unsigned long)frame;
240         frame->timer.function = lowpan_fragment_timer_expired;
241
242         add_timer(&frame->timer);
243
244         list_add_tail(&frame->list, &lowpan_fragments);
245
246         return frame;
247
248 skb_err:
249         kfree(frame);
250 frame_err:
251         return NULL;
252 }
253
254 static int process_data(struct sk_buff *skb)
255 {
256         u8 iphc0, iphc1;
257         const struct ieee802154_addr *_saddr, *_daddr;
258
259         raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
260         /* at least two bytes will be used for the encoding */
261         if (skb->len < 2)
262                 goto drop;
263
264         if (lowpan_fetch_skb_u8(skb, &iphc0))
265                 goto drop;
266
267         /* fragments assembling */
268         switch (iphc0 & LOWPAN_DISPATCH_MASK) {
269         case LOWPAN_DISPATCH_FRAG1:
270         case LOWPAN_DISPATCH_FRAGN:
271         {
272                 struct lowpan_fragment *frame;
273                 /* slen stores the rightmost 8 bits of the 11 bits length */
274                 u8 slen, offset = 0;
275                 u16 len, tag;
276                 bool found = false;
277
278                 if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */
279                     lowpan_fetch_skb_u16(skb, &tag))  /* fragment tag */
280                         goto drop;
281
282                 /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
283                 len = ((iphc0 & 7) << 8) | slen;
284
285                 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) {
286                         pr_debug("%s received a FRAG1 packet (tag: %d, "
287                                  "size of the entire IP packet: %d)",
288                                  __func__, tag, len);
289                 } else { /* FRAGN */
290                         if (lowpan_fetch_skb_u8(skb, &offset))
291                                 goto unlock_and_drop;
292                         pr_debug("%s received a FRAGN packet (tag: %d, "
293                                  "size of the entire IP packet: %d, "
294                                  "offset: %d)", __func__, tag, len, offset * 8);
295                 }
296
297                 /*
298                  * check if frame assembling with the same tag is
299                  * already in progress
300                  */
301                 spin_lock_bh(&flist_lock);
302
303                 list_for_each_entry(frame, &lowpan_fragments, list)
304                         if (frame->tag == tag) {
305                                 found = true;
306                                 break;
307                         }
308
309                 /* alloc new frame structure */
310                 if (!found) {
311                         pr_debug("%s first fragment received for tag %d, "
312                                  "begin packet reassembly", __func__, tag);
313                         frame = lowpan_alloc_new_frame(skb, len, tag);
314                         if (!frame)
315                                 goto unlock_and_drop;
316                 }
317
318                 /* if payload fits buffer, copy it */
319                 if (likely((offset * 8 + skb->len) <= frame->length))
320                         skb_copy_to_linear_data_offset(frame->skb, offset * 8,
321                                                         skb->data, skb->len);
322                 else
323                         goto unlock_and_drop;
324
325                 frame->bytes_rcv += skb->len;
326
327                 /* frame assembling complete */
328                 if ((frame->bytes_rcv == frame->length) &&
329                      frame->timer.expires > jiffies) {
330                         /* if timer haven't expired - first of all delete it */
331                         del_timer_sync(&frame->timer);
332                         list_del(&frame->list);
333                         spin_unlock_bh(&flist_lock);
334
335                         pr_debug("%s successfully reassembled fragment "
336                                  "(tag %d)", __func__, tag);
337
338                         dev_kfree_skb(skb);
339                         skb = frame->skb;
340                         kfree(frame);
341
342                         if (lowpan_fetch_skb_u8(skb, &iphc0))
343                                 goto drop;
344
345                         break;
346                 }
347                 spin_unlock_bh(&flist_lock);
348
349                 return kfree_skb(skb), 0;
350         }
351         default:
352                 break;
353         }
354
355         if (lowpan_fetch_skb_u8(skb, &iphc1))
356                 goto drop;
357
358         _saddr = &mac_cb(skb)->sa;
359         _daddr = &mac_cb(skb)->da;
360
361         return lowpan_process_data(skb, skb->dev, (u8 *)_saddr->hwaddr,
362                                 _saddr->addr_type, IEEE802154_ADDR_LEN,
363                                 (u8 *)_daddr->hwaddr, _daddr->addr_type,
364                                 IEEE802154_ADDR_LEN, iphc0, iphc1,
365                                 lowpan_give_skb_to_devices);
366
367 unlock_and_drop:
368         spin_unlock_bh(&flist_lock);
369 drop:
370         kfree_skb(skb);
371         return -EINVAL;
372 }
373
374 static int lowpan_set_address(struct net_device *dev, void *p)
375 {
376         struct sockaddr *sa = p;
377
378         if (netif_running(dev))
379                 return -EBUSY;
380
381         /* TODO: validate addr */
382         memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
383
384         return 0;
385 }
386
387 static int
388 lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
389                         int mlen, int plen, int offset, int type)
390 {
391         struct sk_buff *frag;
392         int hlen;
393
394         hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
395                         LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
396
397         raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
398
399         frag = netdev_alloc_skb(skb->dev,
400                                 hlen + mlen + plen + IEEE802154_MFR_SIZE);
401         if (!frag)
402                 return -ENOMEM;
403
404         frag->priority = skb->priority;
405
406         /* copy header, MFR and payload */
407         skb_put(frag, mlen);
408         skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
409
410         skb_put(frag, hlen);
411         skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
412
413         skb_put(frag, plen);
414         skb_copy_to_linear_data_offset(frag, mlen + hlen,
415                                        skb_network_header(skb) + offset, plen);
416
417         raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
418
419         return dev_queue_xmit(frag);
420 }
421
422 static int
423 lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
424 {
425         int  err, header_length, payload_length, tag, offset = 0;
426         u8 head[5];
427
428         header_length = skb->mac_len;
429         payload_length = skb->len - header_length;
430         tag = lowpan_dev_info(dev)->fragment_tag++;
431
432         /* first fragment header */
433         head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7);
434         head[1] = payload_length & 0xff;
435         head[2] = tag >> 8;
436         head[3] = tag & 0xff;
437
438         err = lowpan_fragment_xmit(skb, head, header_length, LOWPAN_FRAG_SIZE,
439                                    0, LOWPAN_DISPATCH_FRAG1);
440
441         if (err) {
442                 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
443                          __func__, tag);
444                 goto exit;
445         }
446
447         offset = LOWPAN_FRAG_SIZE;
448
449         /* next fragment header */
450         head[0] &= ~LOWPAN_DISPATCH_FRAG1;
451         head[0] |= LOWPAN_DISPATCH_FRAGN;
452
453         while (payload_length - offset > 0) {
454                 int len = LOWPAN_FRAG_SIZE;
455
456                 head[4] = offset / 8;
457
458                 if (payload_length - offset < len)
459                         len = payload_length - offset;
460
461                 err = lowpan_fragment_xmit(skb, head, header_length,
462                                            len, offset, LOWPAN_DISPATCH_FRAGN);
463                 if (err) {
464                         pr_debug("%s unable to send a subsequent FRAGN packet "
465                                  "(tag: %d, offset: %d", __func__, tag, offset);
466                         goto exit;
467                 }
468
469                 offset += len;
470         }
471
472 exit:
473         return err;
474 }
475
476 static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
477 {
478         int err = -1;
479
480         pr_debug("package xmit\n");
481
482         skb->dev = lowpan_dev_info(dev)->real_dev;
483         if (skb->dev == NULL) {
484                 pr_debug("ERROR: no real wpan device found\n");
485                 goto error;
486         }
487
488         /* Send directly if less than the MTU minus the 2 checksum bytes. */
489         if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
490                 err = dev_queue_xmit(skb);
491                 goto out;
492         }
493
494         pr_debug("frame is too big, fragmentation is needed\n");
495         err = lowpan_skb_fragmentation(skb, dev);
496 error:
497         dev_kfree_skb(skb);
498 out:
499         if (err)
500                 pr_debug("ERROR: xmit failed\n");
501
502         return (err < 0) ? NET_XMIT_DROP : err;
503 }
504
505 static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
506 {
507         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
508         return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
509 }
510
511 static u16 lowpan_get_pan_id(const struct net_device *dev)
512 {
513         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
514         return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
515 }
516
517 static u16 lowpan_get_short_addr(const struct net_device *dev)
518 {
519         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
520         return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
521 }
522
523 static u8 lowpan_get_dsn(const struct net_device *dev)
524 {
525         struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
526         return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
527 }
528
529 static struct header_ops lowpan_header_ops = {
530         .create = lowpan_header_create,
531 };
532
533 static struct lock_class_key lowpan_tx_busylock;
534 static struct lock_class_key lowpan_netdev_xmit_lock_key;
535
536 static void lowpan_set_lockdep_class_one(struct net_device *dev,
537                                          struct netdev_queue *txq,
538                                          void *_unused)
539 {
540         lockdep_set_class(&txq->_xmit_lock,
541                           &lowpan_netdev_xmit_lock_key);
542 }
543
544
545 static int lowpan_dev_init(struct net_device *dev)
546 {
547         netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
548         dev->qdisc_tx_busylock = &lowpan_tx_busylock;
549         return 0;
550 }
551
552 static const struct net_device_ops lowpan_netdev_ops = {
553         .ndo_init               = lowpan_dev_init,
554         .ndo_start_xmit         = lowpan_xmit,
555         .ndo_set_mac_address    = lowpan_set_address,
556 };
557
558 static struct ieee802154_mlme_ops lowpan_mlme = {
559         .get_pan_id = lowpan_get_pan_id,
560         .get_phy = lowpan_get_phy,
561         .get_short_addr = lowpan_get_short_addr,
562         .get_dsn = lowpan_get_dsn,
563 };
564
565 static void lowpan_setup(struct net_device *dev)
566 {
567         dev->addr_len           = IEEE802154_ADDR_LEN;
568         memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
569         dev->type               = ARPHRD_IEEE802154;
570         /* Frame Control + Sequence Number + Address fields + Security Header */
571         dev->hard_header_len    = 2 + 1 + 20 + 14;
572         dev->needed_tailroom    = 2; /* FCS */
573         dev->mtu                = 1281;
574         dev->tx_queue_len       = 0;
575         dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
576         dev->watchdog_timeo     = 0;
577
578         dev->netdev_ops         = &lowpan_netdev_ops;
579         dev->header_ops         = &lowpan_header_ops;
580         dev->ml_priv            = &lowpan_mlme;
581         dev->destructor         = free_netdev;
582 }
583
584 static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
585 {
586         if (tb[IFLA_ADDRESS]) {
587                 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
588                         return -EINVAL;
589         }
590         return 0;
591 }
592
593 static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
594         struct packet_type *pt, struct net_device *orig_dev)
595 {
596         struct sk_buff *local_skb;
597
598         if (!netif_running(dev))
599                 goto drop;
600
601         if (dev->type != ARPHRD_IEEE802154)
602                 goto drop;
603
604         /* check that it's our buffer */
605         if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
606                 /* Copy the packet so that the IPv6 header is
607                  * properly aligned.
608                  */
609                 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
610                                             skb_tailroom(skb), GFP_ATOMIC);
611                 if (!local_skb)
612                         goto drop;
613
614                 local_skb->protocol = htons(ETH_P_IPV6);
615                 local_skb->pkt_type = PACKET_HOST;
616
617                 /* Pull off the 1-byte of 6lowpan header. */
618                 skb_pull(local_skb, 1);
619
620                 lowpan_give_skb_to_devices(local_skb, NULL);
621
622                 kfree_skb(local_skb);
623                 kfree_skb(skb);
624         } else {
625                 switch (skb->data[0] & 0xe0) {
626                 case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
627                 case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
628                 case LOWPAN_DISPATCH_FRAGN:     /* next fragments headers */
629                         local_skb = skb_clone(skb, GFP_ATOMIC);
630                         if (!local_skb)
631                                 goto drop;
632                         process_data(local_skb);
633
634                         kfree_skb(skb);
635                         break;
636                 default:
637                         break;
638                 }
639         }
640
641         return NET_RX_SUCCESS;
642
643 drop:
644         kfree_skb(skb);
645         return NET_RX_DROP;
646 }
647
648 static int lowpan_newlink(struct net *src_net, struct net_device *dev,
649                           struct nlattr *tb[], struct nlattr *data[])
650 {
651         struct net_device *real_dev;
652         struct lowpan_dev_record *entry;
653
654         pr_debug("adding new link\n");
655
656         if (!tb[IFLA_LINK])
657                 return -EINVAL;
658         /* find and hold real wpan device */
659         real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
660         if (!real_dev)
661                 return -ENODEV;
662         if (real_dev->type != ARPHRD_IEEE802154) {
663                 dev_put(real_dev);
664                 return -EINVAL;
665         }
666
667         lowpan_dev_info(dev)->real_dev = real_dev;
668         lowpan_dev_info(dev)->fragment_tag = 0;
669         mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
670
671         entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
672         if (!entry) {
673                 dev_put(real_dev);
674                 lowpan_dev_info(dev)->real_dev = NULL;
675                 return -ENOMEM;
676         }
677
678         entry->ldev = dev;
679
680         /* Set the lowpan harware address to the wpan hardware address. */
681         memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
682
683         mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
684         INIT_LIST_HEAD(&entry->list);
685         list_add_tail(&entry->list, &lowpan_devices);
686         mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
687
688         register_netdevice(dev);
689
690         return 0;
691 }
692
693 static void lowpan_dellink(struct net_device *dev, struct list_head *head)
694 {
695         struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
696         struct net_device *real_dev = lowpan_dev->real_dev;
697         struct lowpan_dev_record *entry, *tmp;
698
699         ASSERT_RTNL();
700
701         mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
702         list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
703                 if (entry->ldev == dev) {
704                         list_del(&entry->list);
705                         kfree(entry);
706                 }
707         }
708         mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
709
710         mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
711
712         unregister_netdevice_queue(dev, head);
713
714         dev_put(real_dev);
715 }
716
717 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
718         .kind           = "lowpan",
719         .priv_size      = sizeof(struct lowpan_dev_info),
720         .setup          = lowpan_setup,
721         .newlink        = lowpan_newlink,
722         .dellink        = lowpan_dellink,
723         .validate       = lowpan_validate,
724 };
725
726 static inline int __init lowpan_netlink_init(void)
727 {
728         return rtnl_link_register(&lowpan_link_ops);
729 }
730
731 static inline void lowpan_netlink_fini(void)
732 {
733         rtnl_link_unregister(&lowpan_link_ops);
734 }
735
736 static int lowpan_device_event(struct notifier_block *unused,
737                                unsigned long event, void *ptr)
738 {
739         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
740         LIST_HEAD(del_list);
741         struct lowpan_dev_record *entry, *tmp;
742
743         if (dev->type != ARPHRD_IEEE802154)
744                 goto out;
745
746         if (event == NETDEV_UNREGISTER) {
747                 list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
748                         if (lowpan_dev_info(entry->ldev)->real_dev == dev)
749                                 lowpan_dellink(entry->ldev, &del_list);
750                 }
751
752                 unregister_netdevice_many(&del_list);
753         }
754
755 out:
756         return NOTIFY_DONE;
757 }
758
759 static struct notifier_block lowpan_dev_notifier = {
760         .notifier_call = lowpan_device_event,
761 };
762
763 static struct packet_type lowpan_packet_type = {
764         .type = __constant_htons(ETH_P_IEEE802154),
765         .func = lowpan_rcv,
766 };
767
768 static int __init lowpan_init_module(void)
769 {
770         int err = 0;
771
772         err = lowpan_netlink_init();
773         if (err < 0)
774                 goto out;
775
776         dev_add_pack(&lowpan_packet_type);
777
778         err = register_netdevice_notifier(&lowpan_dev_notifier);
779         if (err < 0) {
780                 dev_remove_pack(&lowpan_packet_type);
781                 lowpan_netlink_fini();
782         }
783 out:
784         return err;
785 }
786
787 static void __exit lowpan_cleanup_module(void)
788 {
789         struct lowpan_fragment *frame, *tframe;
790
791         lowpan_netlink_fini();
792
793         dev_remove_pack(&lowpan_packet_type);
794
795         unregister_netdevice_notifier(&lowpan_dev_notifier);
796
797         /* Now 6lowpan packet_type is removed, so no new fragments are
798          * expected on RX, therefore that's the time to clean incomplete
799          * fragments.
800          */
801         spin_lock_bh(&flist_lock);
802         list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
803                 del_timer_sync(&frame->timer);
804                 list_del(&frame->list);
805                 dev_kfree_skb(frame->skb);
806                 kfree(frame);
807         }
808         spin_unlock_bh(&flist_lock);
809 }
810
811 module_init(lowpan_init_module);
812 module_exit(lowpan_cleanup_module);
813 MODULE_LICENSE("GPL");
814 MODULE_ALIAS_RTNL_LINK("lowpan");