]> Pileus Git - ~andy/linux/blob - net/batman-adv/translation-table.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
[~andy/linux] / net / batman-adv / translation-table.c
1 /*
2  * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hash.h"
26 #include "originator.h"
27
28 static void hna_local_purge(struct work_struct *work);
29 static void _hna_global_del_orig(struct bat_priv *bat_priv,
30                                  struct hna_global_entry *hna_global_entry,
31                                  char *message);
32
33 static void hna_local_start_timer(struct bat_priv *bat_priv)
34 {
35         INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
36         queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
37 }
38
39 int hna_local_init(struct bat_priv *bat_priv)
40 {
41         if (bat_priv->hna_local_hash)
42                 return 1;
43
44         bat_priv->hna_local_hash = hash_new(1024);
45
46         if (!bat_priv->hna_local_hash)
47                 return 0;
48
49         atomic_set(&bat_priv->hna_local_changed, 0);
50         hna_local_start_timer(bat_priv);
51
52         return 1;
53 }
54
55 void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
56 {
57         struct bat_priv *bat_priv = netdev_priv(soft_iface);
58         struct hna_local_entry *hna_local_entry;
59         struct hna_global_entry *hna_global_entry;
60         int required_bytes;
61
62         spin_lock_bh(&bat_priv->hna_lhash_lock);
63         hna_local_entry =
64                 ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
65                                                      compare_orig, choose_orig,
66                                                      addr));
67         spin_unlock_bh(&bat_priv->hna_lhash_lock);
68
69         if (hna_local_entry) {
70                 hna_local_entry->last_seen = jiffies;
71                 return;
72         }
73
74         /* only announce as many hosts as possible in the batman-packet and
75            space in batman_packet->num_hna That also should give a limit to
76            MAC-flooding. */
77         required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN;
78         required_bytes += BAT_PACKET_LEN;
79
80         if ((required_bytes > ETH_DATA_LEN) ||
81             (atomic_read(&bat_priv->aggregated_ogms) &&
82              required_bytes > MAX_AGGREGATION_BYTES) ||
83             (bat_priv->num_local_hna + 1 > 255)) {
84                 bat_dbg(DBG_ROUTES, bat_priv,
85                         "Can't add new local hna entry (%pM): "
86                         "number of local hna entries exceeds packet size\n",
87                         addr);
88                 return;
89         }
90
91         bat_dbg(DBG_ROUTES, bat_priv,
92                 "Creating new local hna entry: %pM\n", addr);
93
94         hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
95         if (!hna_local_entry)
96                 return;
97
98         memcpy(hna_local_entry->addr, addr, ETH_ALEN);
99         hna_local_entry->last_seen = jiffies;
100
101         /* the batman interface mac address should never be purged */
102         if (compare_orig(addr, soft_iface->dev_addr))
103                 hna_local_entry->never_purge = 1;
104         else
105                 hna_local_entry->never_purge = 0;
106
107         spin_lock_bh(&bat_priv->hna_lhash_lock);
108
109         hash_add(bat_priv->hna_local_hash, compare_orig, choose_orig,
110                  hna_local_entry);
111         bat_priv->num_local_hna++;
112         atomic_set(&bat_priv->hna_local_changed, 1);
113
114         spin_unlock_bh(&bat_priv->hna_lhash_lock);
115
116         /* remove address from global hash if present */
117         spin_lock_bh(&bat_priv->hna_ghash_lock);
118
119         hna_global_entry = ((struct hna_global_entry *)
120                                 hash_find(bat_priv->hna_global_hash,
121                                           compare_orig, choose_orig, addr));
122
123         if (hna_global_entry)
124                 _hna_global_del_orig(bat_priv, hna_global_entry,
125                                      "local hna received");
126
127         spin_unlock_bh(&bat_priv->hna_ghash_lock);
128 }
129
130 int hna_local_fill_buffer(struct bat_priv *bat_priv,
131                           unsigned char *buff, int buff_len)
132 {
133         struct hashtable_t *hash = bat_priv->hna_local_hash;
134         struct hna_local_entry *hna_local_entry;
135         struct element_t *bucket;
136         int i;
137         struct hlist_node *walk;
138         struct hlist_head *head;
139         int count = 0;
140
141         spin_lock_bh(&bat_priv->hna_lhash_lock);
142
143         for (i = 0; i < hash->size; i++) {
144                 head = &hash->table[i];
145
146                 hlist_for_each_entry(bucket, walk, head, hlist) {
147
148                         if (buff_len < (count + 1) * ETH_ALEN)
149                                 break;
150
151                         hna_local_entry = bucket->data;
152                         memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr,
153                                ETH_ALEN);
154
155                         count++;
156                 }
157         }
158
159         /* if we did not get all new local hnas see you next time  ;-) */
160         if (count == bat_priv->num_local_hna)
161                 atomic_set(&bat_priv->hna_local_changed, 0);
162
163         spin_unlock_bh(&bat_priv->hna_lhash_lock);
164         return count;
165 }
166
167 int hna_local_seq_print_text(struct seq_file *seq, void *offset)
168 {
169         struct net_device *net_dev = (struct net_device *)seq->private;
170         struct bat_priv *bat_priv = netdev_priv(net_dev);
171         struct hashtable_t *hash = bat_priv->hna_local_hash;
172         struct hna_local_entry *hna_local_entry;
173         int i;
174         struct hlist_node *walk;
175         struct hlist_head *head;
176         struct element_t *bucket;
177         size_t buf_size, pos;
178         char *buff;
179
180         if (!bat_priv->primary_if) {
181                 return seq_printf(seq, "BATMAN mesh %s disabled - "
182                                "please specify interfaces to enable it\n",
183                                net_dev->name);
184         }
185
186         seq_printf(seq, "Locally retrieved addresses (from %s) "
187                    "announced via HNA:\n",
188                    net_dev->name);
189
190         spin_lock_bh(&bat_priv->hna_lhash_lock);
191
192         buf_size = 1;
193         /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
194         for (i = 0; i < hash->size; i++) {
195                 head = &hash->table[i];
196
197                 hlist_for_each(walk, head)
198                         buf_size += 21;
199         }
200
201         buff = kmalloc(buf_size, GFP_ATOMIC);
202         if (!buff) {
203                 spin_unlock_bh(&bat_priv->hna_lhash_lock);
204                 return -ENOMEM;
205         }
206         buff[0] = '\0';
207         pos = 0;
208
209         for (i = 0; i < hash->size; i++) {
210                 head = &hash->table[i];
211
212                 hlist_for_each_entry(bucket, walk, head, hlist) {
213                         hna_local_entry = bucket->data;
214
215                         pos += snprintf(buff + pos, 22, " * %pM\n",
216                                         hna_local_entry->addr);
217                 }
218         }
219
220         spin_unlock_bh(&bat_priv->hna_lhash_lock);
221
222         seq_printf(seq, "%s", buff);
223         kfree(buff);
224         return 0;
225 }
226
227 static void _hna_local_del(void *data, void *arg)
228 {
229         struct bat_priv *bat_priv = (struct bat_priv *)arg;
230
231         kfree(data);
232         bat_priv->num_local_hna--;
233         atomic_set(&bat_priv->hna_local_changed, 1);
234 }
235
236 static void hna_local_del(struct bat_priv *bat_priv,
237                           struct hna_local_entry *hna_local_entry,
238                           char *message)
239 {
240         bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
241                 hna_local_entry->addr, message);
242
243         hash_remove(bat_priv->hna_local_hash, compare_orig, choose_orig,
244                     hna_local_entry->addr);
245         _hna_local_del(hna_local_entry, bat_priv);
246 }
247
248 void hna_local_remove(struct bat_priv *bat_priv,
249                       uint8_t *addr, char *message)
250 {
251         struct hna_local_entry *hna_local_entry;
252
253         spin_lock_bh(&bat_priv->hna_lhash_lock);
254
255         hna_local_entry = (struct hna_local_entry *)
256                 hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig,
257                           addr);
258
259         if (hna_local_entry)
260                 hna_local_del(bat_priv, hna_local_entry, message);
261
262         spin_unlock_bh(&bat_priv->hna_lhash_lock);
263 }
264
265 static void hna_local_purge(struct work_struct *work)
266 {
267         struct delayed_work *delayed_work =
268                 container_of(work, struct delayed_work, work);
269         struct bat_priv *bat_priv =
270                 container_of(delayed_work, struct bat_priv, hna_work);
271         struct hashtable_t *hash = bat_priv->hna_local_hash;
272         struct hna_local_entry *hna_local_entry;
273         int i;
274         struct hlist_node *walk, *safe;
275         struct hlist_head *head;
276         struct element_t *bucket;
277         unsigned long timeout;
278
279         spin_lock_bh(&bat_priv->hna_lhash_lock);
280
281         for (i = 0; i < hash->size; i++) {
282                 head = &hash->table[i];
283
284                 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
285                         hna_local_entry = bucket->data;
286
287                         timeout = hna_local_entry->last_seen;
288                         timeout += LOCAL_HNA_TIMEOUT * HZ;
289
290                         if ((!hna_local_entry->never_purge) &&
291                             time_after(jiffies, timeout))
292                                 hna_local_del(bat_priv, hna_local_entry,
293                                         "address timed out");
294                 }
295         }
296
297         spin_unlock_bh(&bat_priv->hna_lhash_lock);
298         hna_local_start_timer(bat_priv);
299 }
300
301 void hna_local_free(struct bat_priv *bat_priv)
302 {
303         if (!bat_priv->hna_local_hash)
304                 return;
305
306         cancel_delayed_work_sync(&bat_priv->hna_work);
307         hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv);
308         bat_priv->hna_local_hash = NULL;
309 }
310
311 int hna_global_init(struct bat_priv *bat_priv)
312 {
313         if (bat_priv->hna_global_hash)
314                 return 1;
315
316         bat_priv->hna_global_hash = hash_new(1024);
317
318         if (!bat_priv->hna_global_hash)
319                 return 0;
320
321         return 1;
322 }
323
324 void hna_global_add_orig(struct bat_priv *bat_priv,
325                          struct orig_node *orig_node,
326                          unsigned char *hna_buff, int hna_buff_len)
327 {
328         struct hna_global_entry *hna_global_entry;
329         struct hna_local_entry *hna_local_entry;
330         int hna_buff_count = 0;
331         unsigned char *hna_ptr;
332
333         while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
334                 spin_lock_bh(&bat_priv->hna_ghash_lock);
335
336                 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
337                 hna_global_entry = (struct hna_global_entry *)
338                         hash_find(bat_priv->hna_global_hash, compare_orig,
339                                   choose_orig, hna_ptr);
340
341                 if (!hna_global_entry) {
342                         spin_unlock_bh(&bat_priv->hna_ghash_lock);
343
344                         hna_global_entry =
345                                 kmalloc(sizeof(struct hna_global_entry),
346                                         GFP_ATOMIC);
347
348                         if (!hna_global_entry)
349                                 break;
350
351                         memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
352
353                         bat_dbg(DBG_ROUTES, bat_priv,
354                                 "Creating new global hna entry: "
355                                 "%pM (via %pM)\n",
356                                 hna_global_entry->addr, orig_node->orig);
357
358                         spin_lock_bh(&bat_priv->hna_ghash_lock);
359                         hash_add(bat_priv->hna_global_hash, compare_orig,
360                                  choose_orig, hna_global_entry);
361
362                 }
363
364                 hna_global_entry->orig_node = orig_node;
365                 spin_unlock_bh(&bat_priv->hna_ghash_lock);
366
367                 /* remove address from local hash if present */
368                 spin_lock_bh(&bat_priv->hna_lhash_lock);
369
370                 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
371                 hna_local_entry = (struct hna_local_entry *)
372                         hash_find(bat_priv->hna_local_hash, compare_orig,
373                                   choose_orig, hna_ptr);
374
375                 if (hna_local_entry)
376                         hna_local_del(bat_priv, hna_local_entry,
377                                       "global hna received");
378
379                 spin_unlock_bh(&bat_priv->hna_lhash_lock);
380
381                 hna_buff_count++;
382         }
383
384         /* initialize, and overwrite if malloc succeeds */
385         orig_node->hna_buff = NULL;
386         orig_node->hna_buff_len = 0;
387
388         if (hna_buff_len > 0) {
389                 orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC);
390                 if (orig_node->hna_buff) {
391                         memcpy(orig_node->hna_buff, hna_buff, hna_buff_len);
392                         orig_node->hna_buff_len = hna_buff_len;
393                 }
394         }
395 }
396
397 int hna_global_seq_print_text(struct seq_file *seq, void *offset)
398 {
399         struct net_device *net_dev = (struct net_device *)seq->private;
400         struct bat_priv *bat_priv = netdev_priv(net_dev);
401         struct hashtable_t *hash = bat_priv->hna_global_hash;
402         struct hna_global_entry *hna_global_entry;
403         int i;
404         struct hlist_node *walk;
405         struct hlist_head *head;
406         struct element_t *bucket;
407         size_t buf_size, pos;
408         char *buff;
409
410         if (!bat_priv->primary_if) {
411                 return seq_printf(seq, "BATMAN mesh %s disabled - "
412                                   "please specify interfaces to enable it\n",
413                                   net_dev->name);
414         }
415
416         seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
417                    net_dev->name);
418
419         spin_lock_bh(&bat_priv->hna_ghash_lock);
420
421         buf_size = 1;
422         /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
423         for (i = 0; i < hash->size; i++) {
424                 head = &hash->table[i];
425
426                 hlist_for_each(walk, head)
427                         buf_size += 43;
428         }
429
430         buff = kmalloc(buf_size, GFP_ATOMIC);
431         if (!buff) {
432                 spin_unlock_bh(&bat_priv->hna_ghash_lock);
433                 return -ENOMEM;
434         }
435         buff[0] = '\0';
436         pos = 0;
437
438         for (i = 0; i < hash->size; i++) {
439                 head = &hash->table[i];
440
441                 hlist_for_each_entry(bucket, walk, head, hlist) {
442                         hna_global_entry = bucket->data;
443
444                         pos += snprintf(buff + pos, 44,
445                                         " * %pM via %pM\n",
446                                         hna_global_entry->addr,
447                                         hna_global_entry->orig_node->orig);
448                 }
449         }
450
451         spin_unlock_bh(&bat_priv->hna_ghash_lock);
452
453         seq_printf(seq, "%s", buff);
454         kfree(buff);
455         return 0;
456 }
457
458 static void _hna_global_del_orig(struct bat_priv *bat_priv,
459                                  struct hna_global_entry *hna_global_entry,
460                                  char *message)
461 {
462         bat_dbg(DBG_ROUTES, bat_priv,
463                 "Deleting global hna entry %pM (via %pM): %s\n",
464                 hna_global_entry->addr, hna_global_entry->orig_node->orig,
465                 message);
466
467         hash_remove(bat_priv->hna_global_hash, compare_orig, choose_orig,
468                     hna_global_entry->addr);
469         kfree(hna_global_entry);
470 }
471
472 void hna_global_del_orig(struct bat_priv *bat_priv,
473                          struct orig_node *orig_node, char *message)
474 {
475         struct hna_global_entry *hna_global_entry;
476         int hna_buff_count = 0;
477         unsigned char *hna_ptr;
478
479         if (orig_node->hna_buff_len == 0)
480                 return;
481
482         spin_lock_bh(&bat_priv->hna_ghash_lock);
483
484         while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
485                 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
486                 hna_global_entry = (struct hna_global_entry *)
487                         hash_find(bat_priv->hna_global_hash, compare_orig,
488                                   choose_orig, hna_ptr);
489
490                 if ((hna_global_entry) &&
491                     (hna_global_entry->orig_node == orig_node))
492                         _hna_global_del_orig(bat_priv, hna_global_entry,
493                                              message);
494
495                 hna_buff_count++;
496         }
497
498         spin_unlock_bh(&bat_priv->hna_ghash_lock);
499
500         orig_node->hna_buff_len = 0;
501         kfree(orig_node->hna_buff);
502         orig_node->hna_buff = NULL;
503 }
504
505 static void hna_global_del(void *data, void *arg)
506 {
507         kfree(data);
508 }
509
510 void hna_global_free(struct bat_priv *bat_priv)
511 {
512         if (!bat_priv->hna_global_hash)
513                 return;
514
515         hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL);
516         bat_priv->hna_global_hash = NULL;
517 }
518
519 struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
520 {
521         struct hna_global_entry *hna_global_entry;
522
523         spin_lock_bh(&bat_priv->hna_ghash_lock);
524         hna_global_entry = (struct hna_global_entry *)
525                                 hash_find(bat_priv->hna_global_hash,
526                                           compare_orig, choose_orig, addr);
527         spin_unlock_bh(&bat_priv->hna_ghash_lock);
528
529         if (!hna_global_entry)
530                 return NULL;
531
532         return hna_global_entry->orig_node;
533 }