]> Pileus Git - ~andy/linux/blob - net/batman-adv/send.c
batman-adv: Replace version info instead of appending them
[~andy/linux] / net / batman-adv / send.c
1 /*
2  * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "send.h"
24 #include "routing.h"
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "vis.h"
29 #include "aggregation.h"
30 #include "gateway_common.h"
31 #include "originator.h"
32
33 static void send_outstanding_bcast_packet(struct work_struct *work);
34
35 /* apply hop penalty for a normal link */
36 static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
37 {
38         int hop_penalty = atomic_read(&bat_priv->hop_penalty);
39         return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
40 }
41
42 /* when do we schedule our own packet to be sent */
43 static unsigned long own_send_time(const struct bat_priv *bat_priv)
44 {
45         return jiffies + msecs_to_jiffies(
46                    atomic_read(&bat_priv->orig_interval) -
47                    JITTER + (random32() % 2*JITTER));
48 }
49
50 /* when do we schedule a forwarded packet to be sent */
51 static unsigned long forward_send_time(void)
52 {
53         return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
54 }
55
56 /* send out an already prepared packet to the given address via the
57  * specified batman interface */
58 int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
59                     const uint8_t *dst_addr)
60 {
61         struct ethhdr *ethhdr;
62
63         if (hard_iface->if_status != IF_ACTIVE)
64                 goto send_skb_err;
65
66         if (unlikely(!hard_iface->net_dev))
67                 goto send_skb_err;
68
69         if (!(hard_iface->net_dev->flags & IFF_UP)) {
70                 pr_warning("Interface %s is not up - can't send packet via "
71                            "that interface!\n", hard_iface->net_dev->name);
72                 goto send_skb_err;
73         }
74
75         /* push to the ethernet header. */
76         if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
77                 goto send_skb_err;
78
79         skb_reset_mac_header(skb);
80
81         ethhdr = (struct ethhdr *) skb_mac_header(skb);
82         memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
83         memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
84         ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
85
86         skb_set_network_header(skb, ETH_HLEN);
87         skb->priority = TC_PRIO_CONTROL;
88         skb->protocol = __constant_htons(ETH_P_BATMAN);
89
90         skb->dev = hard_iface->net_dev;
91
92         /* dev_queue_xmit() returns a negative result on error.  However on
93          * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
94          * (which is > 0). This will not be treated as an error. */
95
96         return dev_queue_xmit(skb);
97 send_skb_err:
98         kfree_skb(skb);
99         return NET_XMIT_DROP;
100 }
101
102 /* Send a packet to a given interface */
103 static void send_packet_to_if(struct forw_packet *forw_packet,
104                               struct hard_iface *hard_iface)
105 {
106         struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
107         char *fwd_str;
108         uint8_t packet_num;
109         int16_t buff_pos;
110         struct batman_packet *batman_packet;
111         struct sk_buff *skb;
112
113         if (hard_iface->if_status != IF_ACTIVE)
114                 return;
115
116         packet_num = 0;
117         buff_pos = 0;
118         batman_packet = (struct batman_packet *)forw_packet->skb->data;
119
120         /* adjust all flags and log packets */
121         while (aggregated_packet(buff_pos,
122                                  forw_packet->packet_len,
123                                  batman_packet->tt_num_changes)) {
124
125                 /* we might have aggregated direct link packets with an
126                  * ordinary base packet */
127                 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
128                     (forw_packet->if_incoming == hard_iface))
129                         batman_packet->flags |= DIRECTLINK;
130                 else
131                         batman_packet->flags &= ~DIRECTLINK;
132
133                 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
134                                                             "Sending own" :
135                                                             "Forwarding"));
136                 bat_dbg(DBG_BATMAN, bat_priv,
137                         "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
138                         " IDF %s, hvn %d) on interface %s [%pM]\n",
139                         fwd_str, (packet_num > 0 ? "aggregated " : ""),
140                         batman_packet->orig, ntohl(batman_packet->seqno),
141                         batman_packet->tq, batman_packet->ttl,
142                         (batman_packet->flags & DIRECTLINK ?
143                          "on" : "off"),
144                         batman_packet->ttvn, hard_iface->net_dev->name,
145                         hard_iface->net_dev->dev_addr);
146
147                 buff_pos += sizeof(*batman_packet) +
148                         tt_len(batman_packet->tt_num_changes);
149                 packet_num++;
150                 batman_packet = (struct batman_packet *)
151                         (forw_packet->skb->data + buff_pos);
152         }
153
154         /* create clone because function is called more than once */
155         skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
156         if (skb)
157                 send_skb_packet(skb, hard_iface, broadcast_addr);
158 }
159
160 /* send a batman packet */
161 static void send_packet(struct forw_packet *forw_packet)
162 {
163         struct hard_iface *hard_iface;
164         struct net_device *soft_iface;
165         struct bat_priv *bat_priv;
166         struct hard_iface *primary_if = NULL;
167         struct batman_packet *batman_packet =
168                 (struct batman_packet *)(forw_packet->skb->data);
169         int directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
170
171         if (!forw_packet->if_incoming) {
172                 pr_err("Error - can't forward packet: incoming iface not "
173                        "specified\n");
174                 goto out;
175         }
176
177         soft_iface = forw_packet->if_incoming->soft_iface;
178         bat_priv = netdev_priv(soft_iface);
179
180         if (forw_packet->if_incoming->if_status != IF_ACTIVE)
181                 goto out;
182
183         primary_if = primary_if_get_selected(bat_priv);
184         if (!primary_if)
185                 goto out;
186
187         /* multihomed peer assumed */
188         /* non-primary OGMs are only broadcasted on their interface */
189         if ((directlink && (batman_packet->ttl == 1)) ||
190             (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
191
192                 /* FIXME: what about aggregated packets ? */
193                 bat_dbg(DBG_BATMAN, bat_priv,
194                         "%s packet (originator %pM, seqno %d, TTL %d) "
195                         "on interface %s [%pM]\n",
196                         (forw_packet->own ? "Sending own" : "Forwarding"),
197                         batman_packet->orig, ntohl(batman_packet->seqno),
198                         batman_packet->ttl,
199                         forw_packet->if_incoming->net_dev->name,
200                         forw_packet->if_incoming->net_dev->dev_addr);
201
202                 /* skb is only used once and than forw_packet is free'd */
203                 send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
204                                 broadcast_addr);
205                 forw_packet->skb = NULL;
206
207                 goto out;
208         }
209
210         /* broadcast on every interface */
211         rcu_read_lock();
212         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
213                 if (hard_iface->soft_iface != soft_iface)
214                         continue;
215
216                 send_packet_to_if(forw_packet, hard_iface);
217         }
218         rcu_read_unlock();
219
220 out:
221         if (primary_if)
222                 hardif_free_ref(primary_if);
223 }
224
225 static void realloc_packet_buffer(struct hard_iface *hard_iface,
226                                 int new_len)
227 {
228         unsigned char *new_buff;
229         struct batman_packet *batman_packet;
230
231         new_buff = kmalloc(new_len, GFP_ATOMIC);
232
233         /* keep old buffer if kmalloc should fail */
234         if (new_buff) {
235                 memcpy(new_buff, hard_iface->packet_buff,
236                        sizeof(*batman_packet));
237
238                 kfree(hard_iface->packet_buff);
239                 hard_iface->packet_buff = new_buff;
240                 hard_iface->packet_len = new_len;
241         }
242 }
243
244 /* when calling this function (hard_iface == primary_if) has to be true */
245 static void prepare_packet_buffer(struct bat_priv *bat_priv,
246                                   struct hard_iface *hard_iface)
247 {
248         int new_len;
249         struct batman_packet *batman_packet;
250
251         new_len = BAT_PACKET_LEN +
252                   tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
253
254         /* if we have too many changes for one packet don't send any
255          * and wait for the tt table request which will be fragmented */
256         if (new_len > hard_iface->soft_iface->mtu)
257                 new_len = BAT_PACKET_LEN;
258
259         realloc_packet_buffer(hard_iface, new_len);
260         batman_packet = (struct batman_packet *)hard_iface->packet_buff;
261
262         atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
263
264         /* reset the sending counter */
265         atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
266
267         batman_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv,
268                                 hard_iface->packet_buff + BAT_PACKET_LEN,
269                                 hard_iface->packet_len - BAT_PACKET_LEN);
270
271 }
272
273 static void reset_packet_buffer(struct bat_priv *bat_priv,
274         struct hard_iface *hard_iface)
275 {
276         struct batman_packet *batman_packet;
277
278         realloc_packet_buffer(hard_iface, BAT_PACKET_LEN);
279
280         batman_packet = (struct batman_packet *)hard_iface->packet_buff;
281         batman_packet->tt_num_changes = 0;
282 }
283
284 void schedule_own_packet(struct hard_iface *hard_iface)
285 {
286         struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
287         struct hard_iface *primary_if;
288         unsigned long send_time;
289         struct batman_packet *batman_packet;
290         int vis_server;
291
292         if ((hard_iface->if_status == IF_NOT_IN_USE) ||
293             (hard_iface->if_status == IF_TO_BE_REMOVED))
294                 return;
295
296         vis_server = atomic_read(&bat_priv->vis_mode);
297         primary_if = primary_if_get_selected(bat_priv);
298
299         /**
300          * the interface gets activated here to avoid race conditions between
301          * the moment of activating the interface in
302          * hardif_activate_interface() where the originator mac is set and
303          * outdated packets (especially uninitialized mac addresses) in the
304          * packet queue
305          */
306         if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
307                 hard_iface->if_status = IF_ACTIVE;
308
309         if (hard_iface == primary_if) {
310                 /* if at least one change happened */
311                 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
312                         prepare_packet_buffer(bat_priv, hard_iface);
313                         /* Increment the TTVN only once per OGM interval */
314                         atomic_inc(&bat_priv->ttvn);
315                         bat_priv->tt_poss_change = false;
316                 }
317
318                 /* if the changes have been sent enough times */
319                 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
320                         reset_packet_buffer(bat_priv, hard_iface);
321         }
322
323         /**
324          * NOTE: packet_buff might just have been re-allocated in
325          * prepare_packet_buffer() or in reset_packet_buffer()
326          */
327         batman_packet = (struct batman_packet *)hard_iface->packet_buff;
328
329         /* change sequence number to network order */
330         batman_packet->seqno =
331                 htonl((uint32_t)atomic_read(&hard_iface->seqno));
332
333         batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
334         batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
335
336         if (vis_server == VIS_TYPE_SERVER_SYNC)
337                 batman_packet->flags |= VIS_SERVER;
338         else
339                 batman_packet->flags &= ~VIS_SERVER;
340
341         if ((hard_iface == primary_if) &&
342             (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
343                 batman_packet->gw_flags =
344                                 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
345         else
346                 batman_packet->gw_flags = NO_FLAGS;
347
348         atomic_inc(&hard_iface->seqno);
349
350         slide_own_bcast_window(hard_iface);
351         send_time = own_send_time(bat_priv);
352         add_bat_packet_to_list(bat_priv,
353                                hard_iface->packet_buff,
354                                hard_iface->packet_len,
355                                hard_iface, 1, send_time);
356
357         if (primary_if)
358                 hardif_free_ref(primary_if);
359 }
360
361 void schedule_forward_packet(struct orig_node *orig_node,
362                              const struct ethhdr *ethhdr,
363                              struct batman_packet *batman_packet,
364                              int directlink,
365                              struct hard_iface *if_incoming)
366 {
367         struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
368         struct neigh_node *router;
369         uint8_t in_tq, in_ttl, tq_avg = 0;
370         unsigned long send_time;
371         uint8_t tt_num_changes;
372
373         if (batman_packet->ttl <= 1) {
374                 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
375                 return;
376         }
377
378         router = orig_node_get_router(orig_node);
379
380         in_tq = batman_packet->tq;
381         in_ttl = batman_packet->ttl;
382         tt_num_changes = batman_packet->tt_num_changes;
383
384         batman_packet->ttl--;
385         memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
386
387         /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
388          * of our best tq value */
389         if (router && router->tq_avg != 0) {
390
391                 /* rebroadcast ogm of best ranking neighbor as is */
392                 if (!compare_eth(router->addr, ethhdr->h_source)) {
393                         batman_packet->tq = router->tq_avg;
394
395                         if (router->last_ttl)
396                                 batman_packet->ttl = router->last_ttl - 1;
397                 }
398
399                 tq_avg = router->tq_avg;
400         }
401
402         if (router)
403                 neigh_node_free_ref(router);
404
405         /* apply hop penalty */
406         batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
407
408         bat_dbg(DBG_BATMAN, bat_priv,
409                 "Forwarding packet: tq_orig: %i, tq_avg: %i, "
410                 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
411                 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
412                 batman_packet->ttl);
413
414         batman_packet->seqno = htonl(batman_packet->seqno);
415         batman_packet->tt_crc = htons(batman_packet->tt_crc);
416
417         /* switch of primaries first hop flag when forwarding */
418         batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
419         if (directlink)
420                 batman_packet->flags |= DIRECTLINK;
421         else
422                 batman_packet->flags &= ~DIRECTLINK;
423
424         send_time = forward_send_time();
425         add_bat_packet_to_list(bat_priv,
426                                (unsigned char *)batman_packet,
427                                sizeof(*batman_packet) + tt_len(tt_num_changes),
428                                if_incoming, 0, send_time);
429 }
430
431 static void forw_packet_free(struct forw_packet *forw_packet)
432 {
433         if (forw_packet->skb)
434                 kfree_skb(forw_packet->skb);
435         if (forw_packet->if_incoming)
436                 hardif_free_ref(forw_packet->if_incoming);
437         kfree(forw_packet);
438 }
439
440 static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
441                                       struct forw_packet *forw_packet,
442                                       unsigned long send_time)
443 {
444         INIT_HLIST_NODE(&forw_packet->list);
445
446         /* add new packet to packet list */
447         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
448         hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
449         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
450
451         /* start timer for this packet */
452         INIT_DELAYED_WORK(&forw_packet->delayed_work,
453                           send_outstanding_bcast_packet);
454         queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
455                            send_time);
456 }
457
458 /* add a broadcast packet to the queue and setup timers. broadcast packets
459  * are sent multiple times to increase probability for beeing received.
460  *
461  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
462  * errors.
463  *
464  * The skb is not consumed, so the caller should make sure that the
465  * skb is freed. */
466 int add_bcast_packet_to_list(struct bat_priv *bat_priv,
467                              const struct sk_buff *skb, unsigned long delay)
468 {
469         struct hard_iface *primary_if = NULL;
470         struct forw_packet *forw_packet;
471         struct bcast_packet *bcast_packet;
472         struct sk_buff *newskb;
473
474         if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
475                 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
476                 goto out;
477         }
478
479         primary_if = primary_if_get_selected(bat_priv);
480         if (!primary_if)
481                 goto out_and_inc;
482
483         forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
484
485         if (!forw_packet)
486                 goto out_and_inc;
487
488         newskb = skb_copy(skb, GFP_ATOMIC);
489         if (!newskb)
490                 goto packet_free;
491
492         /* as we have a copy now, it is safe to decrease the TTL */
493         bcast_packet = (struct bcast_packet *)newskb->data;
494         bcast_packet->ttl--;
495
496         skb_reset_mac_header(newskb);
497
498         forw_packet->skb = newskb;
499         forw_packet->if_incoming = primary_if;
500
501         /* how often did we send the bcast packet ? */
502         forw_packet->num_packets = 0;
503
504         _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
505         return NETDEV_TX_OK;
506
507 packet_free:
508         kfree(forw_packet);
509 out_and_inc:
510         atomic_inc(&bat_priv->bcast_queue_left);
511 out:
512         if (primary_if)
513                 hardif_free_ref(primary_if);
514         return NETDEV_TX_BUSY;
515 }
516
517 static void send_outstanding_bcast_packet(struct work_struct *work)
518 {
519         struct hard_iface *hard_iface;
520         struct delayed_work *delayed_work =
521                 container_of(work, struct delayed_work, work);
522         struct forw_packet *forw_packet =
523                 container_of(delayed_work, struct forw_packet, delayed_work);
524         struct sk_buff *skb1;
525         struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
526         struct bat_priv *bat_priv = netdev_priv(soft_iface);
527
528         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
529         hlist_del(&forw_packet->list);
530         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
531
532         if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
533                 goto out;
534
535         /* rebroadcast packet */
536         rcu_read_lock();
537         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
538                 if (hard_iface->soft_iface != soft_iface)
539                         continue;
540
541                 /* send a copy of the saved skb */
542                 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
543                 if (skb1)
544                         send_skb_packet(skb1, hard_iface, broadcast_addr);
545         }
546         rcu_read_unlock();
547
548         forw_packet->num_packets++;
549
550         /* if we still have some more bcasts to send */
551         if (forw_packet->num_packets < 3) {
552                 _add_bcast_packet_to_list(bat_priv, forw_packet,
553                                           ((5 * HZ) / 1000));
554                 return;
555         }
556
557 out:
558         forw_packet_free(forw_packet);
559         atomic_inc(&bat_priv->bcast_queue_left);
560 }
561
562 void send_outstanding_bat_packet(struct work_struct *work)
563 {
564         struct delayed_work *delayed_work =
565                 container_of(work, struct delayed_work, work);
566         struct forw_packet *forw_packet =
567                 container_of(delayed_work, struct forw_packet, delayed_work);
568         struct bat_priv *bat_priv;
569
570         bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
571         spin_lock_bh(&bat_priv->forw_bat_list_lock);
572         hlist_del(&forw_packet->list);
573         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
574
575         if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
576                 goto out;
577
578         send_packet(forw_packet);
579
580         /**
581          * we have to have at least one packet in the queue
582          * to determine the queues wake up time unless we are
583          * shutting down
584          */
585         if (forw_packet->own)
586                 schedule_own_packet(forw_packet->if_incoming);
587
588 out:
589         /* don't count own packet */
590         if (!forw_packet->own)
591                 atomic_inc(&bat_priv->batman_queue_left);
592
593         forw_packet_free(forw_packet);
594 }
595
596 void purge_outstanding_packets(struct bat_priv *bat_priv,
597                                const struct hard_iface *hard_iface)
598 {
599         struct forw_packet *forw_packet;
600         struct hlist_node *tmp_node, *safe_tmp_node;
601         bool pending;
602
603         if (hard_iface)
604                 bat_dbg(DBG_BATMAN, bat_priv,
605                         "purge_outstanding_packets(): %s\n",
606                         hard_iface->net_dev->name);
607         else
608                 bat_dbg(DBG_BATMAN, bat_priv,
609                         "purge_outstanding_packets()\n");
610
611         /* free bcast list */
612         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
613         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
614                                   &bat_priv->forw_bcast_list, list) {
615
616                 /**
617                  * if purge_outstanding_packets() was called with an argmument
618                  * we delete only packets belonging to the given interface
619                  */
620                 if ((hard_iface) &&
621                     (forw_packet->if_incoming != hard_iface))
622                         continue;
623
624                 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
625
626                 /**
627                  * send_outstanding_bcast_packet() will lock the list to
628                  * delete the item from the list
629                  */
630                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
631                 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
632
633                 if (pending) {
634                         hlist_del(&forw_packet->list);
635                         forw_packet_free(forw_packet);
636                 }
637         }
638         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
639
640         /* free batman packet list */
641         spin_lock_bh(&bat_priv->forw_bat_list_lock);
642         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
643                                   &bat_priv->forw_bat_list, list) {
644
645                 /**
646                  * if purge_outstanding_packets() was called with an argmument
647                  * we delete only packets belonging to the given interface
648                  */
649                 if ((hard_iface) &&
650                     (forw_packet->if_incoming != hard_iface))
651                         continue;
652
653                 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
654
655                 /**
656                  * send_outstanding_bat_packet() will lock the list to
657                  * delete the item from the list
658                  */
659                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
660                 spin_lock_bh(&bat_priv->forw_bat_list_lock);
661
662                 if (pending) {
663                         hlist_del(&forw_packet->list);
664                         forw_packet_free(forw_packet);
665                 }
666         }
667         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
668 }