]> Pileus Git - ~andy/linux/blob - net/batman-adv/send.c
ALSA: usb-audio: Add quirk for Logitech Webcam C500
[~andy/linux] / net / batman-adv / send.c
1 /* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include "main.h"
19 #include "distributed-arp-table.h"
20 #include "send.h"
21 #include "routing.h"
22 #include "translation-table.h"
23 #include "soft-interface.h"
24 #include "hard-interface.h"
25 #include "gateway_common.h"
26 #include "gateway_client.h"
27 #include "originator.h"
28 #include "network-coding.h"
29 #include "fragmentation.h"
30
31 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
32
33 /* send out an already prepared packet to the given address via the
34  * specified batman interface
35  */
36 int batadv_send_skb_packet(struct sk_buff *skb,
37                            struct batadv_hard_iface *hard_iface,
38                            const uint8_t *dst_addr)
39 {
40         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
41         struct ethhdr *ethhdr;
42
43         if (hard_iface->if_status != BATADV_IF_ACTIVE)
44                 goto send_skb_err;
45
46         if (unlikely(!hard_iface->net_dev))
47                 goto send_skb_err;
48
49         if (!(hard_iface->net_dev->flags & IFF_UP)) {
50                 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
51                         hard_iface->net_dev->name);
52                 goto send_skb_err;
53         }
54
55         /* push to the ethernet header. */
56         if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
57                 goto send_skb_err;
58
59         skb_reset_mac_header(skb);
60
61         ethhdr = eth_hdr(skb);
62         memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
63         memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
64         ethhdr->h_proto = htons(ETH_P_BATMAN);
65
66         skb_set_network_header(skb, ETH_HLEN);
67         skb->protocol = htons(ETH_P_BATMAN);
68
69         skb->dev = hard_iface->net_dev;
70
71         /* Save a clone of the skb to use when decoding coded packets */
72         batadv_nc_skb_store_for_decoding(bat_priv, skb);
73
74         /* dev_queue_xmit() returns a negative result on error.  However on
75          * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
76          * (which is > 0). This will not be treated as an error.
77          */
78         return dev_queue_xmit(skb);
79 send_skb_err:
80         kfree_skb(skb);
81         return NET_XMIT_DROP;
82 }
83
84 /**
85  * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
86  * @skb: Packet to be transmitted.
87  * @orig_node: Final destination of the packet.
88  * @recv_if: Interface used when receiving the packet (can be NULL).
89  *
90  * Looks up the best next-hop towards the passed originator and passes the
91  * skb on for preparation of MAC header. If the packet originated from this
92  * host, NULL can be passed as recv_if and no interface alternating is
93  * attempted.
94  *
95  * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
96  * NET_XMIT_POLICED if the skb is buffered for later transmit.
97  */
98 int batadv_send_skb_to_orig(struct sk_buff *skb,
99                             struct batadv_orig_node *orig_node,
100                             struct batadv_hard_iface *recv_if)
101 {
102         struct batadv_priv *bat_priv = orig_node->bat_priv;
103         struct batadv_neigh_node *neigh_node;
104         int ret = NET_XMIT_DROP;
105
106         /* batadv_find_router() increases neigh_nodes refcount if found. */
107         neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
108         if (!neigh_node)
109                 goto out;
110
111         /* Check if the skb is too large to send in one piece and fragment
112          * it if needed.
113          */
114         if (atomic_read(&bat_priv->fragmentation) &&
115             skb->len > neigh_node->if_incoming->net_dev->mtu) {
116                 /* Fragment and send packet. */
117                 if (batadv_frag_send_packet(skb, orig_node, neigh_node))
118                         ret = NET_XMIT_SUCCESS;
119
120                 goto out;
121         }
122
123         /* try to network code the packet, if it is received on an interface
124          * (i.e. being forwarded). If the packet originates from this node or if
125          * network coding fails, then send the packet as usual.
126          */
127         if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
128                 ret = NET_XMIT_POLICED;
129         } else {
130                 batadv_send_skb_packet(skb, neigh_node->if_incoming,
131                                        neigh_node->addr);
132                 ret = NET_XMIT_SUCCESS;
133         }
134
135 out:
136         if (neigh_node)
137                 batadv_neigh_node_free_ref(neigh_node);
138
139         return ret;
140 }
141
142 /**
143  * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
144  *  common fields for unicast packets
145  * @skb: the skb carrying the unicast header to initialize
146  * @hdr_size: amount of bytes to push at the beginning of the skb
147  * @orig_node: the destination node
148  *
149  * Returns false if the buffer extension was not possible or true otherwise.
150  */
151 static bool
152 batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
153                                   struct batadv_orig_node *orig_node)
154 {
155         struct batadv_unicast_packet *unicast_packet;
156         uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
157
158         if (batadv_skb_head_push(skb, hdr_size) < 0)
159                 return false;
160
161         unicast_packet = (struct batadv_unicast_packet *)skb->data;
162         unicast_packet->version = BATADV_COMPAT_VERSION;
163         /* batman packet type: unicast */
164         unicast_packet->packet_type = BATADV_UNICAST;
165         /* set unicast ttl */
166         unicast_packet->ttl = BATADV_TTL;
167         /* copy the destination for faster routing */
168         memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
169         /* set the destination tt version number */
170         unicast_packet->ttvn = ttvn;
171
172         return true;
173 }
174
175 /**
176  * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
177  * @skb: the skb containing the payload to encapsulate
178  * @orig_node: the destination node
179  *
180  * Returns false if the payload could not be encapsulated or true otherwise.
181  */
182 static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
183                                             struct batadv_orig_node *orig_node)
184 {
185         size_t uni_size = sizeof(struct batadv_unicast_packet);
186
187         return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
188 }
189
190 /**
191  * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
192  *  unicast 4addr header
193  * @bat_priv: the bat priv with all the soft interface information
194  * @skb: the skb containing the payload to encapsulate
195  * @orig_node: the destination node
196  * @packet_subtype: the unicast 4addr packet subtype to use
197  *
198  * Returns false if the payload could not be encapsulated or true otherwise.
199  */
200 bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
201                                            struct sk_buff *skb,
202                                            struct batadv_orig_node *orig,
203                                            int packet_subtype)
204 {
205         struct batadv_hard_iface *primary_if;
206         struct batadv_unicast_4addr_packet *uc_4addr_packet;
207         bool ret = false;
208
209         primary_if = batadv_primary_if_get_selected(bat_priv);
210         if (!primary_if)
211                 goto out;
212
213         /* Pull the header space and fill the unicast_packet substructure.
214          * We can do that because the first member of the uc_4addr_packet
215          * is of type struct unicast_packet
216          */
217         if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
218                                                orig))
219                 goto out;
220
221         uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
222         uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
223         memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
224         uc_4addr_packet->subtype = packet_subtype;
225         uc_4addr_packet->reserved = 0;
226
227         ret = true;
228 out:
229         if (primary_if)
230                 batadv_hardif_free_ref(primary_if);
231         return ret;
232 }
233
234 /**
235  * batadv_send_skb_unicast - encapsulate and send an skb via unicast
236  * @bat_priv: the bat priv with all the soft interface information
237  * @skb: payload to send
238  * @packet_type: the batman unicast packet type to use
239  * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
240  *  4addr packets)
241  * @orig_node: the originator to send the packet to
242  * @vid: the vid to be used to search the translation table
243  *
244  * Wrap the given skb into a batman-adv unicast or unicast-4addr header
245  * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
246  * as packet_type. Then send this frame to the given orig_node and release a
247  * reference to this orig_node.
248  *
249  * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
250  */
251 static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
252                                    struct sk_buff *skb, int packet_type,
253                                    int packet_subtype,
254                                    struct batadv_orig_node *orig_node,
255                                    unsigned short vid)
256 {
257         struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
258         struct batadv_unicast_packet *unicast_packet;
259         int ret = NET_XMIT_DROP;
260
261         if (!orig_node)
262                 goto out;
263
264         switch (packet_type) {
265         case BATADV_UNICAST:
266                 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
267                         goto out;
268                 break;
269         case BATADV_UNICAST_4ADDR:
270                 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
271                                                            orig_node,
272                                                            packet_subtype))
273                         goto out;
274                 break;
275         default:
276                 /* this function supports UNICAST and UNICAST_4ADDR only. It
277                  * should never be invoked with any other packet type
278                  */
279                 goto out;
280         }
281
282         unicast_packet = (struct batadv_unicast_packet *)skb->data;
283
284         /* inform the destination node that we are still missing a correct route
285          * for this client. The destination will receive this packet and will
286          * try to reroute it because the ttvn contained in the header is less
287          * than the current one
288          */
289         if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
290                 unicast_packet->ttvn = unicast_packet->ttvn - 1;
291
292         if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
293                 ret = NET_XMIT_SUCCESS;
294
295 out:
296         if (orig_node)
297                 batadv_orig_node_free_ref(orig_node);
298         if (ret == NET_XMIT_DROP)
299                 kfree_skb(skb);
300         return ret;
301 }
302
303 /**
304  * batadv_send_skb_via_tt_generic - send an skb via TT lookup
305  * @bat_priv: the bat priv with all the soft interface information
306  * @skb: payload to send
307  * @packet_type: the batman unicast packet type to use
308  * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
309  *  4addr packets)
310  * @vid: the vid to be used to search the translation table
311  *
312  * Look up the recipient node for the destination address in the ethernet
313  * header via the translation table. Wrap the given skb into a batman-adv
314  * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
315  * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
316  * to the according destination node.
317  *
318  * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
319  */
320 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
321                                    struct sk_buff *skb, int packet_type,
322                                    int packet_subtype, uint8_t *dst_hint,
323                                    unsigned short vid)
324 {
325         struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
326         struct batadv_orig_node *orig_node;
327         uint8_t *src, *dst;
328
329         src = ethhdr->h_source;
330         dst = ethhdr->h_dest;
331
332         /* if we got an hint! let's send the packet to this client (if any) */
333         if (dst_hint) {
334                 src = NULL;
335                 dst = dst_hint;
336         }
337         orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
338
339         return batadv_send_skb_unicast(bat_priv, skb, packet_type,
340                                        packet_subtype, orig_node, vid);
341 }
342
343 /**
344  * batadv_send_skb_via_gw - send an skb via gateway lookup
345  * @bat_priv: the bat priv with all the soft interface information
346  * @skb: payload to send
347  * @vid: the vid to be used to search the translation table
348  *
349  * Look up the currently selected gateway. Wrap the given skb into a batman-adv
350  * unicast header and send this frame to this gateway node.
351  *
352  * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
353  */
354 int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
355                            unsigned short vid)
356 {
357         struct batadv_orig_node *orig_node;
358
359         orig_node = batadv_gw_get_selected_orig(bat_priv);
360         return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
361                                        orig_node, vid);
362 }
363
364 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
365 {
366         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
367
368         if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
369             (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
370                 return;
371
372         /* the interface gets activated here to avoid race conditions between
373          * the moment of activating the interface in
374          * hardif_activate_interface() where the originator mac is set and
375          * outdated packets (especially uninitialized mac addresses) in the
376          * packet queue
377          */
378         if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
379                 hard_iface->if_status = BATADV_IF_ACTIVE;
380
381         bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
382 }
383
384 static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
385 {
386         if (forw_packet->skb)
387                 kfree_skb(forw_packet->skb);
388         if (forw_packet->if_incoming)
389                 batadv_hardif_free_ref(forw_packet->if_incoming);
390         if (forw_packet->if_outgoing)
391                 batadv_hardif_free_ref(forw_packet->if_outgoing);
392         kfree(forw_packet);
393 }
394
395 static void
396 _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
397                                  struct batadv_forw_packet *forw_packet,
398                                  unsigned long send_time)
399 {
400         /* add new packet to packet list */
401         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
402         hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
403         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
404
405         /* start timer for this packet */
406         queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
407                            send_time);
408 }
409
410 /* add a broadcast packet to the queue and setup timers. broadcast packets
411  * are sent multiple times to increase probability for being received.
412  *
413  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
414  * errors.
415  *
416  * The skb is not consumed, so the caller should make sure that the
417  * skb is freed.
418  */
419 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
420                                     const struct sk_buff *skb,
421                                     unsigned long delay)
422 {
423         struct batadv_hard_iface *primary_if = NULL;
424         struct batadv_forw_packet *forw_packet;
425         struct batadv_bcast_packet *bcast_packet;
426         struct sk_buff *newskb;
427
428         if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
429                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
430                            "bcast packet queue full\n");
431                 goto out;
432         }
433
434         primary_if = batadv_primary_if_get_selected(bat_priv);
435         if (!primary_if)
436                 goto out_and_inc;
437
438         forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
439
440         if (!forw_packet)
441                 goto out_and_inc;
442
443         newskb = skb_copy(skb, GFP_ATOMIC);
444         if (!newskb)
445                 goto packet_free;
446
447         /* as we have a copy now, it is safe to decrease the TTL */
448         bcast_packet = (struct batadv_bcast_packet *)newskb->data;
449         bcast_packet->ttl--;
450
451         skb_reset_mac_header(newskb);
452
453         forw_packet->skb = newskb;
454         forw_packet->if_incoming = primary_if;
455         forw_packet->if_outgoing = NULL;
456
457         /* how often did we send the bcast packet ? */
458         forw_packet->num_packets = 0;
459
460         INIT_DELAYED_WORK(&forw_packet->delayed_work,
461                           batadv_send_outstanding_bcast_packet);
462
463         _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
464         return NETDEV_TX_OK;
465
466 packet_free:
467         kfree(forw_packet);
468 out_and_inc:
469         atomic_inc(&bat_priv->bcast_queue_left);
470 out:
471         if (primary_if)
472                 batadv_hardif_free_ref(primary_if);
473         return NETDEV_TX_BUSY;
474 }
475
476 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
477 {
478         struct batadv_hard_iface *hard_iface;
479         struct delayed_work *delayed_work;
480         struct batadv_forw_packet *forw_packet;
481         struct sk_buff *skb1;
482         struct net_device *soft_iface;
483         struct batadv_priv *bat_priv;
484
485         delayed_work = container_of(work, struct delayed_work, work);
486         forw_packet = container_of(delayed_work, struct batadv_forw_packet,
487                                    delayed_work);
488         soft_iface = forw_packet->if_incoming->soft_iface;
489         bat_priv = netdev_priv(soft_iface);
490
491         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
492         hlist_del(&forw_packet->list);
493         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
494
495         if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
496                 goto out;
497
498         if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
499                 goto out;
500
501         /* rebroadcast packet */
502         rcu_read_lock();
503         list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
504                 if (hard_iface->soft_iface != soft_iface)
505                         continue;
506
507                 if (forw_packet->num_packets >= hard_iface->num_bcasts)
508                         continue;
509
510                 /* send a copy of the saved skb */
511                 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
512                 if (skb1)
513                         batadv_send_skb_packet(skb1, hard_iface,
514                                                batadv_broadcast_addr);
515         }
516         rcu_read_unlock();
517
518         forw_packet->num_packets++;
519
520         /* if we still have some more bcasts to send */
521         if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
522                 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
523                                                  msecs_to_jiffies(5));
524                 return;
525         }
526
527 out:
528         batadv_forw_packet_free(forw_packet);
529         atomic_inc(&bat_priv->bcast_queue_left);
530 }
531
532 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
533 {
534         struct delayed_work *delayed_work;
535         struct batadv_forw_packet *forw_packet;
536         struct batadv_priv *bat_priv;
537
538         delayed_work = container_of(work, struct delayed_work, work);
539         forw_packet = container_of(delayed_work, struct batadv_forw_packet,
540                                    delayed_work);
541         bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
542         spin_lock_bh(&bat_priv->forw_bat_list_lock);
543         hlist_del(&forw_packet->list);
544         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
545
546         if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
547                 goto out;
548
549         bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
550
551         /* we have to have at least one packet in the queue to determine the
552          * queues wake up time unless we are shutting down.
553          *
554          * only re-schedule if this is the "original" copy, e.g. the OGM of the
555          * primary interface should only be rescheduled once per period, but
556          * this function will be called for the forw_packet instances of the
557          * other secondary interfaces as well.
558          */
559         if (forw_packet->own &&
560             forw_packet->if_incoming == forw_packet->if_outgoing)
561                 batadv_schedule_bat_ogm(forw_packet->if_incoming);
562
563 out:
564         /* don't count own packet */
565         if (!forw_packet->own)
566                 atomic_inc(&bat_priv->batman_queue_left);
567
568         batadv_forw_packet_free(forw_packet);
569 }
570
571 void
572 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
573                                  const struct batadv_hard_iface *hard_iface)
574 {
575         struct batadv_forw_packet *forw_packet;
576         struct hlist_node *safe_tmp_node;
577         bool pending;
578
579         if (hard_iface)
580                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
581                            "purge_outstanding_packets(): %s\n",
582                            hard_iface->net_dev->name);
583         else
584                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
585                            "purge_outstanding_packets()\n");
586
587         /* free bcast list */
588         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
589         hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
590                                   &bat_priv->forw_bcast_list, list) {
591                 /* if purge_outstanding_packets() was called with an argument
592                  * we delete only packets belonging to the given interface
593                  */
594                 if ((hard_iface) &&
595                     (forw_packet->if_incoming != hard_iface))
596                         continue;
597
598                 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
599
600                 /* batadv_send_outstanding_bcast_packet() will lock the list to
601                  * delete the item from the list
602                  */
603                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
604                 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
605
606                 if (pending) {
607                         hlist_del(&forw_packet->list);
608                         batadv_forw_packet_free(forw_packet);
609                 }
610         }
611         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
612
613         /* free batman packet list */
614         spin_lock_bh(&bat_priv->forw_bat_list_lock);
615         hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
616                                   &bat_priv->forw_bat_list, list) {
617                 /* if purge_outstanding_packets() was called with an argument
618                  * we delete only packets belonging to the given interface
619                  */
620                 if ((hard_iface) &&
621                     (forw_packet->if_incoming != hard_iface) &&
622                     (forw_packet->if_outgoing != hard_iface))
623                         continue;
624
625                 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
626
627                 /* send_outstanding_bat_packet() will lock the list to
628                  * delete the item from the list
629                  */
630                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
631                 spin_lock_bh(&bat_priv->forw_bat_list_lock);
632
633                 if (pending) {
634                         hlist_del(&forw_packet->list);
635                         batadv_forw_packet_free(forw_packet);
636                 }
637         }
638         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
639 }