]> Pileus Git - ~andy/linux/blob - net/batman-adv/translation-table.c
batman-adv: separate ethernet comparing calls from hash functions
[~andy/linux] / net / batman-adv / translation-table.c
1 /*
2  * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hash.h"
26 #include "originator.h"
27
28 static void hna_local_purge(struct work_struct *work);
29 static void _hna_global_del_orig(struct bat_priv *bat_priv,
30                                  struct hna_global_entry *hna_global_entry,
31                                  char *message);
32
33 static void hna_local_start_timer(struct bat_priv *bat_priv)
34 {
35         INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
36         queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
37 }
38
39 int hna_local_init(struct bat_priv *bat_priv)
40 {
41         if (bat_priv->hna_local_hash)
42                 return 1;
43
44         bat_priv->hna_local_hash = hash_new(1024);
45
46         if (!bat_priv->hna_local_hash)
47                 return 0;
48
49         atomic_set(&bat_priv->hna_local_changed, 0);
50         hna_local_start_timer(bat_priv);
51
52         return 1;
53 }
54
55 void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
56 {
57         struct bat_priv *bat_priv = netdev_priv(soft_iface);
58         struct hna_local_entry *hna_local_entry;
59         struct hna_global_entry *hna_global_entry;
60         int required_bytes;
61
62         spin_lock_bh(&bat_priv->hna_lhash_lock);
63         rcu_read_lock();
64         hna_local_entry =
65                 ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
66                                                      compare_orig, choose_orig,
67                                                      addr));
68         rcu_read_unlock();
69         spin_unlock_bh(&bat_priv->hna_lhash_lock);
70
71         if (hna_local_entry) {
72                 hna_local_entry->last_seen = jiffies;
73                 return;
74         }
75
76         /* only announce as many hosts as possible in the batman-packet and
77            space in batman_packet->num_hna That also should give a limit to
78            MAC-flooding. */
79         required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN;
80         required_bytes += BAT_PACKET_LEN;
81
82         if ((required_bytes > ETH_DATA_LEN) ||
83             (atomic_read(&bat_priv->aggregated_ogms) &&
84              required_bytes > MAX_AGGREGATION_BYTES) ||
85             (bat_priv->num_local_hna + 1 > 255)) {
86                 bat_dbg(DBG_ROUTES, bat_priv,
87                         "Can't add new local hna entry (%pM): "
88                         "number of local hna entries exceeds packet size\n",
89                         addr);
90                 return;
91         }
92
93         bat_dbg(DBG_ROUTES, bat_priv,
94                 "Creating new local hna entry: %pM\n", addr);
95
96         hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
97         if (!hna_local_entry)
98                 return;
99
100         memcpy(hna_local_entry->addr, addr, ETH_ALEN);
101         hna_local_entry->last_seen = jiffies;
102
103         /* the batman interface mac address should never be purged */
104         if (compare_eth(addr, soft_iface->dev_addr))
105                 hna_local_entry->never_purge = 1;
106         else
107                 hna_local_entry->never_purge = 0;
108
109         spin_lock_bh(&bat_priv->hna_lhash_lock);
110
111         hash_add(bat_priv->hna_local_hash, compare_orig, choose_orig,
112                  hna_local_entry);
113         bat_priv->num_local_hna++;
114         atomic_set(&bat_priv->hna_local_changed, 1);
115
116         spin_unlock_bh(&bat_priv->hna_lhash_lock);
117
118         /* remove address from global hash if present */
119         spin_lock_bh(&bat_priv->hna_ghash_lock);
120
121         rcu_read_lock();
122         hna_global_entry = ((struct hna_global_entry *)
123                                 hash_find(bat_priv->hna_global_hash,
124                                           compare_orig, choose_orig, addr));
125         rcu_read_unlock();
126
127         if (hna_global_entry)
128                 _hna_global_del_orig(bat_priv, hna_global_entry,
129                                      "local hna received");
130
131         spin_unlock_bh(&bat_priv->hna_ghash_lock);
132 }
133
134 int hna_local_fill_buffer(struct bat_priv *bat_priv,
135                           unsigned char *buff, int buff_len)
136 {
137         struct hashtable_t *hash = bat_priv->hna_local_hash;
138         struct hna_local_entry *hna_local_entry;
139         struct element_t *bucket;
140         int i;
141         struct hlist_node *walk;
142         struct hlist_head *head;
143         int count = 0;
144
145         spin_lock_bh(&bat_priv->hna_lhash_lock);
146
147         for (i = 0; i < hash->size; i++) {
148                 head = &hash->table[i];
149
150                 hlist_for_each_entry(bucket, walk, head, hlist) {
151
152                         if (buff_len < (count + 1) * ETH_ALEN)
153                                 break;
154
155                         hna_local_entry = bucket->data;
156                         memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr,
157                                ETH_ALEN);
158
159                         count++;
160                 }
161         }
162
163         /* if we did not get all new local hnas see you next time  ;-) */
164         if (count == bat_priv->num_local_hna)
165                 atomic_set(&bat_priv->hna_local_changed, 0);
166
167         spin_unlock_bh(&bat_priv->hna_lhash_lock);
168         return count;
169 }
170
171 int hna_local_seq_print_text(struct seq_file *seq, void *offset)
172 {
173         struct net_device *net_dev = (struct net_device *)seq->private;
174         struct bat_priv *bat_priv = netdev_priv(net_dev);
175         struct hashtable_t *hash = bat_priv->hna_local_hash;
176         struct hna_local_entry *hna_local_entry;
177         int i;
178         struct hlist_node *walk;
179         struct hlist_head *head;
180         struct element_t *bucket;
181         size_t buf_size, pos;
182         char *buff;
183
184         if (!bat_priv->primary_if) {
185                 return seq_printf(seq, "BATMAN mesh %s disabled - "
186                                "please specify interfaces to enable it\n",
187                                net_dev->name);
188         }
189
190         seq_printf(seq, "Locally retrieved addresses (from %s) "
191                    "announced via HNA:\n",
192                    net_dev->name);
193
194         spin_lock_bh(&bat_priv->hna_lhash_lock);
195
196         buf_size = 1;
197         /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
198         for (i = 0; i < hash->size; i++) {
199                 head = &hash->table[i];
200
201                 hlist_for_each(walk, head)
202                         buf_size += 21;
203         }
204
205         buff = kmalloc(buf_size, GFP_ATOMIC);
206         if (!buff) {
207                 spin_unlock_bh(&bat_priv->hna_lhash_lock);
208                 return -ENOMEM;
209         }
210         buff[0] = '\0';
211         pos = 0;
212
213         for (i = 0; i < hash->size; i++) {
214                 head = &hash->table[i];
215
216                 hlist_for_each_entry(bucket, walk, head, hlist) {
217                         hna_local_entry = bucket->data;
218
219                         pos += snprintf(buff + pos, 22, " * %pM\n",
220                                         hna_local_entry->addr);
221                 }
222         }
223
224         spin_unlock_bh(&bat_priv->hna_lhash_lock);
225
226         seq_printf(seq, "%s", buff);
227         kfree(buff);
228         return 0;
229 }
230
231 static void _hna_local_del(void *data, void *arg)
232 {
233         struct bat_priv *bat_priv = (struct bat_priv *)arg;
234
235         kfree(data);
236         bat_priv->num_local_hna--;
237         atomic_set(&bat_priv->hna_local_changed, 1);
238 }
239
240 static void hna_local_del(struct bat_priv *bat_priv,
241                           struct hna_local_entry *hna_local_entry,
242                           char *message)
243 {
244         bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
245                 hna_local_entry->addr, message);
246
247         hash_remove(bat_priv->hna_local_hash, compare_orig, choose_orig,
248                     hna_local_entry->addr);
249         _hna_local_del(hna_local_entry, bat_priv);
250 }
251
252 void hna_local_remove(struct bat_priv *bat_priv,
253                       uint8_t *addr, char *message)
254 {
255         struct hna_local_entry *hna_local_entry;
256
257         spin_lock_bh(&bat_priv->hna_lhash_lock);
258
259         rcu_read_lock();
260         hna_local_entry = (struct hna_local_entry *)
261                 hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig,
262                           addr);
263         rcu_read_unlock();
264
265         if (hna_local_entry)
266                 hna_local_del(bat_priv, hna_local_entry, message);
267
268         spin_unlock_bh(&bat_priv->hna_lhash_lock);
269 }
270
271 static void hna_local_purge(struct work_struct *work)
272 {
273         struct delayed_work *delayed_work =
274                 container_of(work, struct delayed_work, work);
275         struct bat_priv *bat_priv =
276                 container_of(delayed_work, struct bat_priv, hna_work);
277         struct hashtable_t *hash = bat_priv->hna_local_hash;
278         struct hna_local_entry *hna_local_entry;
279         int i;
280         struct hlist_node *walk, *safe;
281         struct hlist_head *head;
282         struct element_t *bucket;
283         unsigned long timeout;
284
285         spin_lock_bh(&bat_priv->hna_lhash_lock);
286
287         for (i = 0; i < hash->size; i++) {
288                 head = &hash->table[i];
289
290                 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
291                         hna_local_entry = bucket->data;
292
293                         timeout = hna_local_entry->last_seen;
294                         timeout += LOCAL_HNA_TIMEOUT * HZ;
295
296                         if ((!hna_local_entry->never_purge) &&
297                             time_after(jiffies, timeout))
298                                 hna_local_del(bat_priv, hna_local_entry,
299                                         "address timed out");
300                 }
301         }
302
303         spin_unlock_bh(&bat_priv->hna_lhash_lock);
304         hna_local_start_timer(bat_priv);
305 }
306
307 void hna_local_free(struct bat_priv *bat_priv)
308 {
309         if (!bat_priv->hna_local_hash)
310                 return;
311
312         cancel_delayed_work_sync(&bat_priv->hna_work);
313         hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv);
314         bat_priv->hna_local_hash = NULL;
315 }
316
317 int hna_global_init(struct bat_priv *bat_priv)
318 {
319         if (bat_priv->hna_global_hash)
320                 return 1;
321
322         bat_priv->hna_global_hash = hash_new(1024);
323
324         if (!bat_priv->hna_global_hash)
325                 return 0;
326
327         return 1;
328 }
329
330 void hna_global_add_orig(struct bat_priv *bat_priv,
331                          struct orig_node *orig_node,
332                          unsigned char *hna_buff, int hna_buff_len)
333 {
334         struct hna_global_entry *hna_global_entry;
335         struct hna_local_entry *hna_local_entry;
336         int hna_buff_count = 0;
337         unsigned char *hna_ptr;
338
339         while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
340                 spin_lock_bh(&bat_priv->hna_ghash_lock);
341
342                 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
343                 rcu_read_lock();
344                 hna_global_entry = (struct hna_global_entry *)
345                         hash_find(bat_priv->hna_global_hash, compare_orig,
346                                   choose_orig, hna_ptr);
347                 rcu_read_unlock();
348
349                 if (!hna_global_entry) {
350                         spin_unlock_bh(&bat_priv->hna_ghash_lock);
351
352                         hna_global_entry =
353                                 kmalloc(sizeof(struct hna_global_entry),
354                                         GFP_ATOMIC);
355
356                         if (!hna_global_entry)
357                                 break;
358
359                         memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
360
361                         bat_dbg(DBG_ROUTES, bat_priv,
362                                 "Creating new global hna entry: "
363                                 "%pM (via %pM)\n",
364                                 hna_global_entry->addr, orig_node->orig);
365
366                         spin_lock_bh(&bat_priv->hna_ghash_lock);
367                         hash_add(bat_priv->hna_global_hash, compare_orig,
368                                  choose_orig, hna_global_entry);
369
370                 }
371
372                 hna_global_entry->orig_node = orig_node;
373                 spin_unlock_bh(&bat_priv->hna_ghash_lock);
374
375                 /* remove address from local hash if present */
376                 spin_lock_bh(&bat_priv->hna_lhash_lock);
377
378                 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
379                 rcu_read_lock();
380                 hna_local_entry = (struct hna_local_entry *)
381                         hash_find(bat_priv->hna_local_hash, compare_orig,
382                                   choose_orig, hna_ptr);
383                 rcu_read_unlock();
384
385                 if (hna_local_entry)
386                         hna_local_del(bat_priv, hna_local_entry,
387                                       "global hna received");
388
389                 spin_unlock_bh(&bat_priv->hna_lhash_lock);
390
391                 hna_buff_count++;
392         }
393
394         /* initialize, and overwrite if malloc succeeds */
395         orig_node->hna_buff = NULL;
396         orig_node->hna_buff_len = 0;
397
398         if (hna_buff_len > 0) {
399                 orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC);
400                 if (orig_node->hna_buff) {
401                         memcpy(orig_node->hna_buff, hna_buff, hna_buff_len);
402                         orig_node->hna_buff_len = hna_buff_len;
403                 }
404         }
405 }
406
407 int hna_global_seq_print_text(struct seq_file *seq, void *offset)
408 {
409         struct net_device *net_dev = (struct net_device *)seq->private;
410         struct bat_priv *bat_priv = netdev_priv(net_dev);
411         struct hashtable_t *hash = bat_priv->hna_global_hash;
412         struct hna_global_entry *hna_global_entry;
413         int i;
414         struct hlist_node *walk;
415         struct hlist_head *head;
416         struct element_t *bucket;
417         size_t buf_size, pos;
418         char *buff;
419
420         if (!bat_priv->primary_if) {
421                 return seq_printf(seq, "BATMAN mesh %s disabled - "
422                                   "please specify interfaces to enable it\n",
423                                   net_dev->name);
424         }
425
426         seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
427                    net_dev->name);
428
429         spin_lock_bh(&bat_priv->hna_ghash_lock);
430
431         buf_size = 1;
432         /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
433         for (i = 0; i < hash->size; i++) {
434                 head = &hash->table[i];
435
436                 hlist_for_each(walk, head)
437                         buf_size += 43;
438         }
439
440         buff = kmalloc(buf_size, GFP_ATOMIC);
441         if (!buff) {
442                 spin_unlock_bh(&bat_priv->hna_ghash_lock);
443                 return -ENOMEM;
444         }
445         buff[0] = '\0';
446         pos = 0;
447
448         for (i = 0; i < hash->size; i++) {
449                 head = &hash->table[i];
450
451                 hlist_for_each_entry(bucket, walk, head, hlist) {
452                         hna_global_entry = bucket->data;
453
454                         pos += snprintf(buff + pos, 44,
455                                         " * %pM via %pM\n",
456                                         hna_global_entry->addr,
457                                         hna_global_entry->orig_node->orig);
458                 }
459         }
460
461         spin_unlock_bh(&bat_priv->hna_ghash_lock);
462
463         seq_printf(seq, "%s", buff);
464         kfree(buff);
465         return 0;
466 }
467
468 static void _hna_global_del_orig(struct bat_priv *bat_priv,
469                                  struct hna_global_entry *hna_global_entry,
470                                  char *message)
471 {
472         bat_dbg(DBG_ROUTES, bat_priv,
473                 "Deleting global hna entry %pM (via %pM): %s\n",
474                 hna_global_entry->addr, hna_global_entry->orig_node->orig,
475                 message);
476
477         hash_remove(bat_priv->hna_global_hash, compare_orig, choose_orig,
478                     hna_global_entry->addr);
479         kfree(hna_global_entry);
480 }
481
482 void hna_global_del_orig(struct bat_priv *bat_priv,
483                          struct orig_node *orig_node, char *message)
484 {
485         struct hna_global_entry *hna_global_entry;
486         int hna_buff_count = 0;
487         unsigned char *hna_ptr;
488
489         if (orig_node->hna_buff_len == 0)
490                 return;
491
492         spin_lock_bh(&bat_priv->hna_ghash_lock);
493
494         while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
495                 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
496                 rcu_read_lock();
497                 hna_global_entry = (struct hna_global_entry *)
498                         hash_find(bat_priv->hna_global_hash, compare_orig,
499                                   choose_orig, hna_ptr);
500                 rcu_read_unlock();
501
502                 if ((hna_global_entry) &&
503                     (hna_global_entry->orig_node == orig_node))
504                         _hna_global_del_orig(bat_priv, hna_global_entry,
505                                              message);
506
507                 hna_buff_count++;
508         }
509
510         spin_unlock_bh(&bat_priv->hna_ghash_lock);
511
512         orig_node->hna_buff_len = 0;
513         kfree(orig_node->hna_buff);
514         orig_node->hna_buff = NULL;
515 }
516
517 static void hna_global_del(void *data, void *arg)
518 {
519         kfree(data);
520 }
521
522 void hna_global_free(struct bat_priv *bat_priv)
523 {
524         if (!bat_priv->hna_global_hash)
525                 return;
526
527         hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL);
528         bat_priv->hna_global_hash = NULL;
529 }
530
531 struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
532 {
533         struct hna_global_entry *hna_global_entry;
534
535         spin_lock_bh(&bat_priv->hna_ghash_lock);
536         rcu_read_lock();
537         hna_global_entry = (struct hna_global_entry *)
538                                 hash_find(bat_priv->hna_global_hash,
539                                           compare_orig, choose_orig, addr);
540         rcu_read_unlock();
541         spin_unlock_bh(&bat_priv->hna_ghash_lock);
542
543         if (!hna_global_entry)
544                 return NULL;
545
546         return hna_global_entry->orig_node;
547 }