]> Pileus Git - ~andy/linux/blob - net/batman-adv/send.c
ipv6: remove rt6i_peer_genid from rt6_info and its handler
[~andy/linux] / net / batman-adv / send.c
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19
20 #include "main.h"
21 #include "distributed-arp-table.h"
22 #include "send.h"
23 #include "routing.h"
24 #include "translation-table.h"
25 #include "soft-interface.h"
26 #include "hard-interface.h"
27 #include "vis.h"
28 #include "gateway_common.h"
29 #include "originator.h"
30
31 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
32
33 /* send out an already prepared packet to the given address via the
34  * specified batman interface
35  */
36 int batadv_send_skb_packet(struct sk_buff *skb,
37                            struct batadv_hard_iface *hard_iface,
38                            const uint8_t *dst_addr)
39 {
40         struct ethhdr *ethhdr;
41
42         if (hard_iface->if_status != BATADV_IF_ACTIVE)
43                 goto send_skb_err;
44
45         if (unlikely(!hard_iface->net_dev))
46                 goto send_skb_err;
47
48         if (!(hard_iface->net_dev->flags & IFF_UP)) {
49                 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
50                         hard_iface->net_dev->name);
51                 goto send_skb_err;
52         }
53
54         /* push to the ethernet header. */
55         if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
56                 goto send_skb_err;
57
58         skb_reset_mac_header(skb);
59
60         ethhdr = (struct ethhdr *)skb_mac_header(skb);
61         memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
62         memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
63         ethhdr->h_proto = __constant_htons(BATADV_ETH_P_BATMAN);
64
65         skb_set_network_header(skb, ETH_HLEN);
66         skb->priority = TC_PRIO_CONTROL;
67         skb->protocol = __constant_htons(BATADV_ETH_P_BATMAN);
68
69         skb->dev = hard_iface->net_dev;
70
71         /* dev_queue_xmit() returns a negative result on error.  However on
72          * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
73          * (which is > 0). This will not be treated as an error.
74          */
75         return dev_queue_xmit(skb);
76 send_skb_err:
77         kfree_skb(skb);
78         return NET_XMIT_DROP;
79 }
80
81 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
82 {
83         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
84
85         if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
86             (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
87                 return;
88
89         /* the interface gets activated here to avoid race conditions between
90          * the moment of activating the interface in
91          * hardif_activate_interface() where the originator mac is set and
92          * outdated packets (especially uninitialized mac addresses) in the
93          * packet queue
94          */
95         if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
96                 hard_iface->if_status = BATADV_IF_ACTIVE;
97
98         bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
99 }
100
101 static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
102 {
103         if (forw_packet->skb)
104                 kfree_skb(forw_packet->skb);
105         if (forw_packet->if_incoming)
106                 batadv_hardif_free_ref(forw_packet->if_incoming);
107         kfree(forw_packet);
108 }
109
110 static void
111 _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
112                                  struct batadv_forw_packet *forw_packet,
113                                  unsigned long send_time)
114 {
115         INIT_HLIST_NODE(&forw_packet->list);
116
117         /* add new packet to packet list */
118         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
119         hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
120         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
121
122         /* start timer for this packet */
123         INIT_DELAYED_WORK(&forw_packet->delayed_work,
124                           batadv_send_outstanding_bcast_packet);
125         queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
126                            send_time);
127 }
128
129 /* add a broadcast packet to the queue and setup timers. broadcast packets
130  * are sent multiple times to increase probability for being received.
131  *
132  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
133  * errors.
134  *
135  * The skb is not consumed, so the caller should make sure that the
136  * skb is freed.
137  */
138 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
139                                     const struct sk_buff *skb,
140                                     unsigned long delay)
141 {
142         struct batadv_hard_iface *primary_if = NULL;
143         struct batadv_forw_packet *forw_packet;
144         struct batadv_bcast_packet *bcast_packet;
145         struct sk_buff *newskb;
146
147         if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
148                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
149                            "bcast packet queue full\n");
150                 goto out;
151         }
152
153         primary_if = batadv_primary_if_get_selected(bat_priv);
154         if (!primary_if)
155                 goto out_and_inc;
156
157         forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
158
159         if (!forw_packet)
160                 goto out_and_inc;
161
162         newskb = skb_copy(skb, GFP_ATOMIC);
163         if (!newskb)
164                 goto packet_free;
165
166         /* as we have a copy now, it is safe to decrease the TTL */
167         bcast_packet = (struct batadv_bcast_packet *)newskb->data;
168         bcast_packet->header.ttl--;
169
170         skb_reset_mac_header(newskb);
171
172         forw_packet->skb = newskb;
173         forw_packet->if_incoming = primary_if;
174
175         /* how often did we send the bcast packet ? */
176         forw_packet->num_packets = 0;
177
178         _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
179         return NETDEV_TX_OK;
180
181 packet_free:
182         kfree(forw_packet);
183 out_and_inc:
184         atomic_inc(&bat_priv->bcast_queue_left);
185 out:
186         if (primary_if)
187                 batadv_hardif_free_ref(primary_if);
188         return NETDEV_TX_BUSY;
189 }
190
191 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
192 {
193         struct batadv_hard_iface *hard_iface;
194         struct delayed_work *delayed_work;
195         struct batadv_forw_packet *forw_packet;
196         struct sk_buff *skb1;
197         struct net_device *soft_iface;
198         struct batadv_priv *bat_priv;
199
200         delayed_work = container_of(work, struct delayed_work, work);
201         forw_packet = container_of(delayed_work, struct batadv_forw_packet,
202                                    delayed_work);
203         soft_iface = forw_packet->if_incoming->soft_iface;
204         bat_priv = netdev_priv(soft_iface);
205
206         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
207         hlist_del(&forw_packet->list);
208         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
209
210         if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
211                 goto out;
212
213         if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
214                 goto out;
215
216         /* rebroadcast packet */
217         rcu_read_lock();
218         list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
219                 if (hard_iface->soft_iface != soft_iface)
220                         continue;
221
222                 /* send a copy of the saved skb */
223                 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
224                 if (skb1)
225                         batadv_send_skb_packet(skb1, hard_iface,
226                                                batadv_broadcast_addr);
227         }
228         rcu_read_unlock();
229
230         forw_packet->num_packets++;
231
232         /* if we still have some more bcasts to send */
233         if (forw_packet->num_packets < 3) {
234                 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
235                                                  msecs_to_jiffies(5));
236                 return;
237         }
238
239 out:
240         batadv_forw_packet_free(forw_packet);
241         atomic_inc(&bat_priv->bcast_queue_left);
242 }
243
244 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
245 {
246         struct delayed_work *delayed_work;
247         struct batadv_forw_packet *forw_packet;
248         struct batadv_priv *bat_priv;
249
250         delayed_work = container_of(work, struct delayed_work, work);
251         forw_packet = container_of(delayed_work, struct batadv_forw_packet,
252                                    delayed_work);
253         bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
254         spin_lock_bh(&bat_priv->forw_bat_list_lock);
255         hlist_del(&forw_packet->list);
256         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
257
258         if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
259                 goto out;
260
261         bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
262
263         /* we have to have at least one packet in the queue
264          * to determine the queues wake up time unless we are
265          * shutting down
266          */
267         if (forw_packet->own)
268                 batadv_schedule_bat_ogm(forw_packet->if_incoming);
269
270 out:
271         /* don't count own packet */
272         if (!forw_packet->own)
273                 atomic_inc(&bat_priv->batman_queue_left);
274
275         batadv_forw_packet_free(forw_packet);
276 }
277
278 void
279 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
280                                  const struct batadv_hard_iface *hard_iface)
281 {
282         struct batadv_forw_packet *forw_packet;
283         struct hlist_node *tmp_node, *safe_tmp_node;
284         bool pending;
285
286         if (hard_iface)
287                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
288                            "purge_outstanding_packets(): %s\n",
289                            hard_iface->net_dev->name);
290         else
291                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
292                            "purge_outstanding_packets()\n");
293
294         /* free bcast list */
295         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
296         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
297                                   &bat_priv->forw_bcast_list, list) {
298
299                 /* if purge_outstanding_packets() was called with an argument
300                  * we delete only packets belonging to the given interface
301                  */
302                 if ((hard_iface) &&
303                     (forw_packet->if_incoming != hard_iface))
304                         continue;
305
306                 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
307
308                 /* batadv_send_outstanding_bcast_packet() will lock the list to
309                  * delete the item from the list
310                  */
311                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
312                 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
313
314                 if (pending) {
315                         hlist_del(&forw_packet->list);
316                         batadv_forw_packet_free(forw_packet);
317                 }
318         }
319         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
320
321         /* free batman packet list */
322         spin_lock_bh(&bat_priv->forw_bat_list_lock);
323         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
324                                   &bat_priv->forw_bat_list, list) {
325
326                 /* if purge_outstanding_packets() was called with an argument
327                  * we delete only packets belonging to the given interface
328                  */
329                 if ((hard_iface) &&
330                     (forw_packet->if_incoming != hard_iface))
331                         continue;
332
333                 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
334
335                 /* send_outstanding_bat_packet() will lock the list to
336                  * delete the item from the list
337                  */
338                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
339                 spin_lock_bh(&bat_priv->forw_bat_list_lock);
340
341                 if (pending) {
342                         hlist_del(&forw_packet->list);
343                         batadv_forw_packet_free(forw_packet);
344                 }
345         }
346         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
347 }