]> Pileus Git - ~andy/linux/blob - net/netfilter/nf_queue.c
netfilter: rcu sparse cleanups
[~andy/linux] / net / netfilter / nf_queue.c
1 #include <linux/kernel.h>
2 #include <linux/slab.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/proc_fs.h>
6 #include <linux/skbuff.h>
7 #include <linux/netfilter.h>
8 #include <linux/seq_file.h>
9 #include <linux/rcupdate.h>
10 #include <net/protocol.h>
11 #include <net/netfilter/nf_queue.h>
12 #include <net/dst.h>
13
14 #include "nf_internals.h"
15
16 /*
17  * A queue handler may be registered for each protocol.  Each is protected by
18  * long term mutex.  The handler must provide an an outfn() to accept packets
19  * for queueing and must reinject all packets it receives, no matter what.
20  */
21 static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
22
23 static DEFINE_MUTEX(queue_handler_mutex);
24
25 /* return EBUSY when somebody else is registered, return EEXIST if the
26  * same handler is registered, return 0 in case of success. */
27 int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
28 {
29         int ret;
30         const struct nf_queue_handler *old;
31
32         if (pf >= ARRAY_SIZE(queue_handler))
33                 return -EINVAL;
34
35         mutex_lock(&queue_handler_mutex);
36         old = rcu_dereference_protected(queue_handler[pf],
37                                         lockdep_is_held(&queue_handler_mutex));
38         if (old == qh)
39                 ret = -EEXIST;
40         else if (old)
41                 ret = -EBUSY;
42         else {
43                 rcu_assign_pointer(queue_handler[pf], qh);
44                 ret = 0;
45         }
46         mutex_unlock(&queue_handler_mutex);
47
48         return ret;
49 }
50 EXPORT_SYMBOL(nf_register_queue_handler);
51
52 /* The caller must flush their queue before this */
53 int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
54 {
55         const struct nf_queue_handler *old;
56
57         if (pf >= ARRAY_SIZE(queue_handler))
58                 return -EINVAL;
59
60         mutex_lock(&queue_handler_mutex);
61         old = rcu_dereference_protected(queue_handler[pf],
62                                         lockdep_is_held(&queue_handler_mutex));
63         if (old && old != qh) {
64                 mutex_unlock(&queue_handler_mutex);
65                 return -EINVAL;
66         }
67
68         rcu_assign_pointer(queue_handler[pf], NULL);
69         mutex_unlock(&queue_handler_mutex);
70
71         synchronize_rcu();
72
73         return 0;
74 }
75 EXPORT_SYMBOL(nf_unregister_queue_handler);
76
77 void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
78 {
79         u_int8_t pf;
80
81         mutex_lock(&queue_handler_mutex);
82         for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++)  {
83                 if (rcu_dereference_protected(
84                                 queue_handler[pf],
85                                 lockdep_is_held(&queue_handler_mutex)
86                                 ) == qh)
87                         rcu_assign_pointer(queue_handler[pf], NULL);
88         }
89         mutex_unlock(&queue_handler_mutex);
90
91         synchronize_rcu();
92 }
93 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
94
95 static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
96 {
97         /* Release those devices we held, or Alexey will kill me. */
98         if (entry->indev)
99                 dev_put(entry->indev);
100         if (entry->outdev)
101                 dev_put(entry->outdev);
102 #ifdef CONFIG_BRIDGE_NETFILTER
103         if (entry->skb->nf_bridge) {
104                 struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
105
106                 if (nf_bridge->physindev)
107                         dev_put(nf_bridge->physindev);
108                 if (nf_bridge->physoutdev)
109                         dev_put(nf_bridge->physoutdev);
110         }
111 #endif
112         /* Drop reference to owner of hook which queued us. */
113         module_put(entry->elem->owner);
114 }
115
116 /*
117  * Any packet that leaves via this function must come back
118  * through nf_reinject().
119  */
120 static int __nf_queue(struct sk_buff *skb,
121                       struct list_head *elem,
122                       u_int8_t pf, unsigned int hook,
123                       struct net_device *indev,
124                       struct net_device *outdev,
125                       int (*okfn)(struct sk_buff *),
126                       unsigned int queuenum)
127 {
128         int status;
129         struct nf_queue_entry *entry = NULL;
130 #ifdef CONFIG_BRIDGE_NETFILTER
131         struct net_device *physindev;
132         struct net_device *physoutdev;
133 #endif
134         const struct nf_afinfo *afinfo;
135         const struct nf_queue_handler *qh;
136
137         /* QUEUE == DROP if noone is waiting, to be safe. */
138         rcu_read_lock();
139
140         qh = rcu_dereference(queue_handler[pf]);
141         if (!qh)
142                 goto err_unlock;
143
144         afinfo = nf_get_afinfo(pf);
145         if (!afinfo)
146                 goto err_unlock;
147
148         entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
149         if (!entry)
150                 goto err_unlock;
151
152         *entry = (struct nf_queue_entry) {
153                 .skb    = skb,
154                 .elem   = list_entry(elem, struct nf_hook_ops, list),
155                 .pf     = pf,
156                 .hook   = hook,
157                 .indev  = indev,
158                 .outdev = outdev,
159                 .okfn   = okfn,
160         };
161
162         /* If it's going away, ignore hook. */
163         if (!try_module_get(entry->elem->owner)) {
164                 rcu_read_unlock();
165                 kfree(entry);
166                 return 0;
167         }
168
169         /* Bump dev refs so they don't vanish while packet is out */
170         if (indev)
171                 dev_hold(indev);
172         if (outdev)
173                 dev_hold(outdev);
174 #ifdef CONFIG_BRIDGE_NETFILTER
175         if (skb->nf_bridge) {
176                 physindev = skb->nf_bridge->physindev;
177                 if (physindev)
178                         dev_hold(physindev);
179                 physoutdev = skb->nf_bridge->physoutdev;
180                 if (physoutdev)
181                         dev_hold(physoutdev);
182         }
183 #endif
184         skb_dst_force(skb);
185         afinfo->saveroute(skb, entry);
186         status = qh->outfn(entry, queuenum);
187
188         rcu_read_unlock();
189
190         if (status < 0) {
191                 nf_queue_entry_release_refs(entry);
192                 goto err;
193         }
194
195         return 1;
196
197 err_unlock:
198         rcu_read_unlock();
199 err:
200         kfree_skb(skb);
201         kfree(entry);
202         return 1;
203 }
204
205 int nf_queue(struct sk_buff *skb,
206              struct list_head *elem,
207              u_int8_t pf, unsigned int hook,
208              struct net_device *indev,
209              struct net_device *outdev,
210              int (*okfn)(struct sk_buff *),
211              unsigned int queuenum)
212 {
213         struct sk_buff *segs;
214
215         if (!skb_is_gso(skb))
216                 return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
217                                   queuenum);
218
219         switch (pf) {
220         case NFPROTO_IPV4:
221                 skb->protocol = htons(ETH_P_IP);
222                 break;
223         case NFPROTO_IPV6:
224                 skb->protocol = htons(ETH_P_IPV6);
225                 break;
226         }
227
228         segs = skb_gso_segment(skb, 0);
229         kfree_skb(skb);
230         if (IS_ERR(segs))
231                 return 1;
232
233         do {
234                 struct sk_buff *nskb = segs->next;
235
236                 segs->next = NULL;
237                 if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn,
238                                 queuenum))
239                         kfree_skb(segs);
240                 segs = nskb;
241         } while (segs);
242         return 1;
243 }
244
245 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
246 {
247         struct sk_buff *skb = entry->skb;
248         struct list_head *elem = &entry->elem->list;
249         const struct nf_afinfo *afinfo;
250
251         rcu_read_lock();
252
253         nf_queue_entry_release_refs(entry);
254
255         /* Continue traversal iff userspace said ok... */
256         if (verdict == NF_REPEAT) {
257                 elem = elem->prev;
258                 verdict = NF_ACCEPT;
259         }
260
261         if (verdict == NF_ACCEPT) {
262                 afinfo = nf_get_afinfo(entry->pf);
263                 if (!afinfo || afinfo->reroute(skb, entry) < 0)
264                         verdict = NF_DROP;
265         }
266
267         if (verdict == NF_ACCEPT) {
268         next_hook:
269                 verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
270                                      skb, entry->hook,
271                                      entry->indev, entry->outdev, &elem,
272                                      entry->okfn, INT_MIN);
273         }
274
275         switch (verdict & NF_VERDICT_MASK) {
276         case NF_ACCEPT:
277         case NF_STOP:
278                 local_bh_disable();
279                 entry->okfn(skb);
280                 local_bh_enable();
281                 break;
282         case NF_QUEUE:
283                 if (!__nf_queue(skb, elem, entry->pf, entry->hook,
284                                 entry->indev, entry->outdev, entry->okfn,
285                                 verdict >> NF_VERDICT_BITS))
286                         goto next_hook;
287                 break;
288         case NF_STOLEN:
289         default:
290                 kfree_skb(skb);
291         }
292         rcu_read_unlock();
293         kfree(entry);
294 }
295 EXPORT_SYMBOL(nf_reinject);
296
297 #ifdef CONFIG_PROC_FS
298 static void *seq_start(struct seq_file *seq, loff_t *pos)
299 {
300         if (*pos >= ARRAY_SIZE(queue_handler))
301                 return NULL;
302
303         return pos;
304 }
305
306 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
307 {
308         (*pos)++;
309
310         if (*pos >= ARRAY_SIZE(queue_handler))
311                 return NULL;
312
313         return pos;
314 }
315
316 static void seq_stop(struct seq_file *s, void *v)
317 {
318
319 }
320
321 static int seq_show(struct seq_file *s, void *v)
322 {
323         int ret;
324         loff_t *pos = v;
325         const struct nf_queue_handler *qh;
326
327         rcu_read_lock();
328         qh = rcu_dereference(queue_handler[*pos]);
329         if (!qh)
330                 ret = seq_printf(s, "%2lld NONE\n", *pos);
331         else
332                 ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
333         rcu_read_unlock();
334
335         return ret;
336 }
337
338 static const struct seq_operations nfqueue_seq_ops = {
339         .start  = seq_start,
340         .next   = seq_next,
341         .stop   = seq_stop,
342         .show   = seq_show,
343 };
344
345 static int nfqueue_open(struct inode *inode, struct file *file)
346 {
347         return seq_open(file, &nfqueue_seq_ops);
348 }
349
350 static const struct file_operations nfqueue_file_ops = {
351         .owner   = THIS_MODULE,
352         .open    = nfqueue_open,
353         .read    = seq_read,
354         .llseek  = seq_lseek,
355         .release = seq_release,
356 };
357 #endif /* PROC_FS */
358
359
360 int __init netfilter_queue_init(void)
361 {
362 #ifdef CONFIG_PROC_FS
363         if (!proc_create("nf_queue", S_IRUGO,
364                          proc_net_netfilter, &nfqueue_file_ops))
365                 return -1;
366 #endif
367         return 0;
368 }
369