]> Pileus Git - ~andy/linux/blob - net/batman-adv/originator.c
hlist: drop the node parameter from iterators
[~andy/linux] / net / batman-adv / originator.c
1 /* Copyright (C) 2009-2013 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19
20 #include "main.h"
21 #include "distributed-arp-table.h"
22 #include "originator.h"
23 #include "hash.h"
24 #include "translation-table.h"
25 #include "routing.h"
26 #include "gateway_client.h"
27 #include "hard-interface.h"
28 #include "unicast.h"
29 #include "soft-interface.h"
30 #include "bridge_loop_avoidance.h"
31
32 /* hash class keys */
33 static struct lock_class_key batadv_orig_hash_lock_class_key;
34
35 static void batadv_purge_orig(struct work_struct *work);
36
37 /* returns 1 if they are the same originator */
38 static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
39 {
40         const void *data1 = container_of(node, struct batadv_orig_node,
41                                          hash_entry);
42
43         return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
44 }
45
46 int batadv_originator_init(struct batadv_priv *bat_priv)
47 {
48         if (bat_priv->orig_hash)
49                 return 0;
50
51         bat_priv->orig_hash = batadv_hash_new(1024);
52
53         if (!bat_priv->orig_hash)
54                 goto err;
55
56         batadv_hash_set_lock_class(bat_priv->orig_hash,
57                                    &batadv_orig_hash_lock_class_key);
58
59         INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
60         queue_delayed_work(batadv_event_workqueue,
61                            &bat_priv->orig_work,
62                            msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
63
64         return 0;
65
66 err:
67         return -ENOMEM;
68 }
69
70 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
71 {
72         if (atomic_dec_and_test(&neigh_node->refcount))
73                 kfree_rcu(neigh_node, rcu);
74 }
75
76 /* increases the refcounter of a found router */
77 struct batadv_neigh_node *
78 batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
79 {
80         struct batadv_neigh_node *router;
81
82         rcu_read_lock();
83         router = rcu_dereference(orig_node->router);
84
85         if (router && !atomic_inc_not_zero(&router->refcount))
86                 router = NULL;
87
88         rcu_read_unlock();
89         return router;
90 }
91
92 struct batadv_neigh_node *
93 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
94                       const uint8_t *neigh_addr, uint32_t seqno)
95 {
96         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
97         struct batadv_neigh_node *neigh_node;
98
99         neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
100         if (!neigh_node)
101                 goto out;
102
103         INIT_HLIST_NODE(&neigh_node->list);
104
105         memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
106         spin_lock_init(&neigh_node->lq_update_lock);
107
108         /* extra reference for return */
109         atomic_set(&neigh_node->refcount, 2);
110
111         batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
112                    "Creating new neighbor %pM, initial seqno %d\n",
113                    neigh_addr, seqno);
114
115 out:
116         return neigh_node;
117 }
118
119 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
120 {
121         struct hlist_node *node_tmp;
122         struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
123         struct batadv_orig_node *orig_node;
124
125         orig_node = container_of(rcu, struct batadv_orig_node, rcu);
126
127         spin_lock_bh(&orig_node->neigh_list_lock);
128
129         /* for all bonding members ... */
130         list_for_each_entry_safe(neigh_node, tmp_neigh_node,
131                                  &orig_node->bond_list, bonding_list) {
132                 list_del_rcu(&neigh_node->bonding_list);
133                 batadv_neigh_node_free_ref(neigh_node);
134         }
135
136         /* for all neighbors towards this originator ... */
137         hlist_for_each_entry_safe(neigh_node, node_tmp,
138                                   &orig_node->neigh_list, list) {
139                 hlist_del_rcu(&neigh_node->list);
140                 batadv_neigh_node_free_ref(neigh_node);
141         }
142
143         spin_unlock_bh(&orig_node->neigh_list_lock);
144
145         batadv_frag_list_free(&orig_node->frag_list);
146         batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
147                                   "originator timed out");
148
149         kfree(orig_node->tt_buff);
150         kfree(orig_node->bcast_own);
151         kfree(orig_node->bcast_own_sum);
152         kfree(orig_node);
153 }
154
155 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
156 {
157         if (atomic_dec_and_test(&orig_node->refcount))
158                 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
159 }
160
161 void batadv_originator_free(struct batadv_priv *bat_priv)
162 {
163         struct batadv_hashtable *hash = bat_priv->orig_hash;
164         struct hlist_node *node_tmp;
165         struct hlist_head *head;
166         spinlock_t *list_lock; /* spinlock to protect write access */
167         struct batadv_orig_node *orig_node;
168         uint32_t i;
169
170         if (!hash)
171                 return;
172
173         cancel_delayed_work_sync(&bat_priv->orig_work);
174
175         bat_priv->orig_hash = NULL;
176
177         for (i = 0; i < hash->size; i++) {
178                 head = &hash->table[i];
179                 list_lock = &hash->list_locks[i];
180
181                 spin_lock_bh(list_lock);
182                 hlist_for_each_entry_safe(orig_node, node_tmp,
183                                           head, hash_entry) {
184                         hlist_del_rcu(&orig_node->hash_entry);
185                         batadv_orig_node_free_ref(orig_node);
186                 }
187                 spin_unlock_bh(list_lock);
188         }
189
190         batadv_hash_destroy(hash);
191 }
192
193 /* this function finds or creates an originator entry for the given
194  * address if it does not exits
195  */
196 struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
197                                               const uint8_t *addr)
198 {
199         struct batadv_orig_node *orig_node;
200         int size;
201         int hash_added;
202         unsigned long reset_time;
203
204         orig_node = batadv_orig_hash_find(bat_priv, addr);
205         if (orig_node)
206                 return orig_node;
207
208         batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
209                    "Creating new originator: %pM\n", addr);
210
211         orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
212         if (!orig_node)
213                 return NULL;
214
215         INIT_HLIST_HEAD(&orig_node->neigh_list);
216         INIT_LIST_HEAD(&orig_node->bond_list);
217         spin_lock_init(&orig_node->ogm_cnt_lock);
218         spin_lock_init(&orig_node->bcast_seqno_lock);
219         spin_lock_init(&orig_node->neigh_list_lock);
220         spin_lock_init(&orig_node->tt_buff_lock);
221
222         /* extra reference for return */
223         atomic_set(&orig_node->refcount, 2);
224
225         orig_node->tt_initialised = false;
226         orig_node->bat_priv = bat_priv;
227         memcpy(orig_node->orig, addr, ETH_ALEN);
228         batadv_dat_init_orig_node_addr(orig_node);
229         orig_node->router = NULL;
230         orig_node->tt_crc = 0;
231         atomic_set(&orig_node->last_ttvn, 0);
232         orig_node->tt_buff = NULL;
233         orig_node->tt_buff_len = 0;
234         atomic_set(&orig_node->tt_size, 0);
235         reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
236         orig_node->bcast_seqno_reset = reset_time;
237         orig_node->batman_seqno_reset = reset_time;
238
239         atomic_set(&orig_node->bond_candidates, 0);
240
241         size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
242
243         orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
244         if (!orig_node->bcast_own)
245                 goto free_orig_node;
246
247         size = bat_priv->num_ifaces * sizeof(uint8_t);
248         orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
249
250         INIT_LIST_HEAD(&orig_node->frag_list);
251         orig_node->last_frag_packet = 0;
252
253         if (!orig_node->bcast_own_sum)
254                 goto free_bcast_own;
255
256         hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
257                                      batadv_choose_orig, orig_node,
258                                      &orig_node->hash_entry);
259         if (hash_added != 0)
260                 goto free_bcast_own_sum;
261
262         return orig_node;
263 free_bcast_own_sum:
264         kfree(orig_node->bcast_own_sum);
265 free_bcast_own:
266         kfree(orig_node->bcast_own);
267 free_orig_node:
268         kfree(orig_node);
269         return NULL;
270 }
271
272 static bool
273 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
274                             struct batadv_orig_node *orig_node,
275                             struct batadv_neigh_node **best_neigh_node)
276 {
277         struct hlist_node *node_tmp;
278         struct batadv_neigh_node *neigh_node;
279         bool neigh_purged = false;
280         unsigned long last_seen;
281         struct batadv_hard_iface *if_incoming;
282
283         *best_neigh_node = NULL;
284
285         spin_lock_bh(&orig_node->neigh_list_lock);
286
287         /* for all neighbors towards this originator ... */
288         hlist_for_each_entry_safe(neigh_node, node_tmp,
289                                   &orig_node->neigh_list, list) {
290                 last_seen = neigh_node->last_seen;
291                 if_incoming = neigh_node->if_incoming;
292
293                 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
294                     (if_incoming->if_status == BATADV_IF_INACTIVE) ||
295                     (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
296                     (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
297                         if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
298                             (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
299                             (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
300                                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
301                                            "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
302                                            orig_node->orig, neigh_node->addr,
303                                            if_incoming->net_dev->name);
304                         else
305                                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
306                                            "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
307                                            orig_node->orig, neigh_node->addr,
308                                            jiffies_to_msecs(last_seen));
309
310                         neigh_purged = true;
311
312                         hlist_del_rcu(&neigh_node->list);
313                         batadv_bonding_candidate_del(orig_node, neigh_node);
314                         batadv_neigh_node_free_ref(neigh_node);
315                 } else {
316                         if ((!*best_neigh_node) ||
317                             (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
318                                 *best_neigh_node = neigh_node;
319                 }
320         }
321
322         spin_unlock_bh(&orig_node->neigh_list_lock);
323         return neigh_purged;
324 }
325
326 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
327                                    struct batadv_orig_node *orig_node)
328 {
329         struct batadv_neigh_node *best_neigh_node;
330
331         if (batadv_has_timed_out(orig_node->last_seen,
332                                  2 * BATADV_PURGE_TIMEOUT)) {
333                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
334                            "Originator timeout: originator %pM, last_seen %u\n",
335                            orig_node->orig,
336                            jiffies_to_msecs(orig_node->last_seen));
337                 return true;
338         } else {
339                 if (batadv_purge_orig_neighbors(bat_priv, orig_node,
340                                                 &best_neigh_node))
341                         batadv_update_route(bat_priv, orig_node,
342                                             best_neigh_node);
343         }
344
345         return false;
346 }
347
348 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
349 {
350         struct batadv_hashtable *hash = bat_priv->orig_hash;
351         struct hlist_node *node_tmp;
352         struct hlist_head *head;
353         spinlock_t *list_lock; /* spinlock to protect write access */
354         struct batadv_orig_node *orig_node;
355         uint32_t i;
356
357         if (!hash)
358                 return;
359
360         /* for all origins... */
361         for (i = 0; i < hash->size; i++) {
362                 head = &hash->table[i];
363                 list_lock = &hash->list_locks[i];
364
365                 spin_lock_bh(list_lock);
366                 hlist_for_each_entry_safe(orig_node, node_tmp,
367                                           head, hash_entry) {
368                         if (batadv_purge_orig_node(bat_priv, orig_node)) {
369                                 if (orig_node->gw_flags)
370                                         batadv_gw_node_delete(bat_priv,
371                                                               orig_node);
372                                 hlist_del_rcu(&orig_node->hash_entry);
373                                 batadv_orig_node_free_ref(orig_node);
374                                 continue;
375                         }
376
377                         if (batadv_has_timed_out(orig_node->last_frag_packet,
378                                                  BATADV_FRAG_TIMEOUT))
379                                 batadv_frag_list_free(&orig_node->frag_list);
380                 }
381                 spin_unlock_bh(list_lock);
382         }
383
384         batadv_gw_node_purge(bat_priv);
385         batadv_gw_election(bat_priv);
386 }
387
388 static void batadv_purge_orig(struct work_struct *work)
389 {
390         struct delayed_work *delayed_work;
391         struct batadv_priv *bat_priv;
392
393         delayed_work = container_of(work, struct delayed_work, work);
394         bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
395         _batadv_purge_orig(bat_priv);
396         queue_delayed_work(batadv_event_workqueue,
397                            &bat_priv->orig_work,
398                            msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
399 }
400
401 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
402 {
403         _batadv_purge_orig(bat_priv);
404 }
405
406 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
407 {
408         struct net_device *net_dev = (struct net_device *)seq->private;
409         struct batadv_priv *bat_priv = netdev_priv(net_dev);
410         struct batadv_hashtable *hash = bat_priv->orig_hash;
411         struct hlist_head *head;
412         struct batadv_hard_iface *primary_if;
413         struct batadv_orig_node *orig_node;
414         struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
415         int batman_count = 0;
416         int last_seen_secs;
417         int last_seen_msecs;
418         unsigned long last_seen_jiffies;
419         uint32_t i;
420
421         primary_if = batadv_seq_print_text_primary_if_get(seq);
422         if (!primary_if)
423                 goto out;
424
425         seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
426                    BATADV_SOURCE_VERSION, primary_if->net_dev->name,
427                    primary_if->net_dev->dev_addr, net_dev->name);
428         seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
429                    "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
430                    "Nexthop", "outgoingIF", "Potential nexthops");
431
432         for (i = 0; i < hash->size; i++) {
433                 head = &hash->table[i];
434
435                 rcu_read_lock();
436                 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
437                         neigh_node = batadv_orig_node_get_router(orig_node);
438                         if (!neigh_node)
439                                 continue;
440
441                         if (neigh_node->tq_avg == 0)
442                                 goto next;
443
444                         last_seen_jiffies = jiffies - orig_node->last_seen;
445                         last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
446                         last_seen_secs = last_seen_msecs / 1000;
447                         last_seen_msecs = last_seen_msecs % 1000;
448
449                         seq_printf(seq, "%pM %4i.%03is   (%3i) %pM [%10s]:",
450                                    orig_node->orig, last_seen_secs,
451                                    last_seen_msecs, neigh_node->tq_avg,
452                                    neigh_node->addr,
453                                    neigh_node->if_incoming->net_dev->name);
454
455                         hlist_for_each_entry_rcu(neigh_node_tmp,
456                                                  &orig_node->neigh_list, list) {
457                                 seq_printf(seq, " %pM (%3i)",
458                                            neigh_node_tmp->addr,
459                                            neigh_node_tmp->tq_avg);
460                         }
461
462                         seq_printf(seq, "\n");
463                         batman_count++;
464
465 next:
466                         batadv_neigh_node_free_ref(neigh_node);
467                 }
468                 rcu_read_unlock();
469         }
470
471         if (batman_count == 0)
472                 seq_printf(seq, "No batman nodes in range ...\n");
473
474 out:
475         if (primary_if)
476                 batadv_hardif_free_ref(primary_if);
477         return 0;
478 }
479
480 static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
481                                    int max_if_num)
482 {
483         void *data_ptr;
484         size_t data_size, old_size;
485
486         data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
487         old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
488         data_ptr = kmalloc(data_size, GFP_ATOMIC);
489         if (!data_ptr)
490                 return -ENOMEM;
491
492         memcpy(data_ptr, orig_node->bcast_own, old_size);
493         kfree(orig_node->bcast_own);
494         orig_node->bcast_own = data_ptr;
495
496         data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
497         if (!data_ptr)
498                 return -ENOMEM;
499
500         memcpy(data_ptr, orig_node->bcast_own_sum,
501                (max_if_num - 1) * sizeof(uint8_t));
502         kfree(orig_node->bcast_own_sum);
503         orig_node->bcast_own_sum = data_ptr;
504
505         return 0;
506 }
507
508 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
509                             int max_if_num)
510 {
511         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
512         struct batadv_hashtable *hash = bat_priv->orig_hash;
513         struct hlist_head *head;
514         struct batadv_orig_node *orig_node;
515         uint32_t i;
516         int ret;
517
518         /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
519          * if_num
520          */
521         for (i = 0; i < hash->size; i++) {
522                 head = &hash->table[i];
523
524                 rcu_read_lock();
525                 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
526                         spin_lock_bh(&orig_node->ogm_cnt_lock);
527                         ret = batadv_orig_node_add_if(orig_node, max_if_num);
528                         spin_unlock_bh(&orig_node->ogm_cnt_lock);
529
530                         if (ret == -ENOMEM)
531                                 goto err;
532                 }
533                 rcu_read_unlock();
534         }
535
536         return 0;
537
538 err:
539         rcu_read_unlock();
540         return -ENOMEM;
541 }
542
543 static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
544                                    int max_if_num, int del_if_num)
545 {
546         void *data_ptr = NULL;
547         int chunk_size;
548
549         /* last interface was removed */
550         if (max_if_num == 0)
551                 goto free_bcast_own;
552
553         chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
554         data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
555         if (!data_ptr)
556                 return -ENOMEM;
557
558         /* copy first part */
559         memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
560
561         /* copy second part */
562         memcpy((char *)data_ptr + del_if_num * chunk_size,
563                orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
564                (max_if_num - del_if_num) * chunk_size);
565
566 free_bcast_own:
567         kfree(orig_node->bcast_own);
568         orig_node->bcast_own = data_ptr;
569
570         if (max_if_num == 0)
571                 goto free_own_sum;
572
573         data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
574         if (!data_ptr)
575                 return -ENOMEM;
576
577         memcpy(data_ptr, orig_node->bcast_own_sum,
578                del_if_num * sizeof(uint8_t));
579
580         memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
581                orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
582                (max_if_num - del_if_num) * sizeof(uint8_t));
583
584 free_own_sum:
585         kfree(orig_node->bcast_own_sum);
586         orig_node->bcast_own_sum = data_ptr;
587
588         return 0;
589 }
590
591 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
592                             int max_if_num)
593 {
594         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
595         struct batadv_hashtable *hash = bat_priv->orig_hash;
596         struct hlist_head *head;
597         struct batadv_hard_iface *hard_iface_tmp;
598         struct batadv_orig_node *orig_node;
599         uint32_t i;
600         int ret;
601
602         /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
603          * if_num
604          */
605         for (i = 0; i < hash->size; i++) {
606                 head = &hash->table[i];
607
608                 rcu_read_lock();
609                 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
610                         spin_lock_bh(&orig_node->ogm_cnt_lock);
611                         ret = batadv_orig_node_del_if(orig_node, max_if_num,
612                                                       hard_iface->if_num);
613                         spin_unlock_bh(&orig_node->ogm_cnt_lock);
614
615                         if (ret == -ENOMEM)
616                                 goto err;
617                 }
618                 rcu_read_unlock();
619         }
620
621         /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
622         rcu_read_lock();
623         list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
624                 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
625                         continue;
626
627                 if (hard_iface == hard_iface_tmp)
628                         continue;
629
630                 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
631                         continue;
632
633                 if (hard_iface_tmp->if_num > hard_iface->if_num)
634                         hard_iface_tmp->if_num--;
635         }
636         rcu_read_unlock();
637
638         hard_iface->if_num = -1;
639         return 0;
640
641 err:
642         rcu_read_unlock();
643         return -ENOMEM;
644 }