]> Pileus Git - ~andy/linux/blob - net/netfilter/nfnetlink_queue.c
netfilter: nfnetlink_queue: provide rcu enabled callbacks
[~andy/linux] / net / netfilter / nfnetlink_queue.c
1 /*
2  * This is a module which is used for queueing packets and communicating with
3  * userspace via nfnetlink.
4  *
5  * (C) 2005 by Harald Welte <laforge@netfilter.org>
6  * (C) 2007 by Patrick McHardy <kaber@trash.net>
7  *
8  * Based on the old ipv4-only ip_queue.c:
9  * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
10  * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 #include <linux/module.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/slab.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/proc_fs.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/netfilter_ipv6.h>
28 #include <linux/netfilter/nfnetlink.h>
29 #include <linux/netfilter/nfnetlink_queue.h>
30 #include <linux/list.h>
31 #include <net/sock.h>
32 #include <net/netfilter/nf_queue.h>
33
34 #include <asm/atomic.h>
35
36 #ifdef CONFIG_BRIDGE_NETFILTER
37 #include "../bridge/br_private.h"
38 #endif
39
40 #define NFQNL_QMAX_DEFAULT 1024
41
42 struct nfqnl_instance {
43         struct hlist_node hlist;                /* global list of queues */
44         struct rcu_head rcu;
45
46         int peer_pid;
47         unsigned int queue_maxlen;
48         unsigned int copy_range;
49         unsigned int queue_dropped;
50         unsigned int queue_user_dropped;
51
52
53         u_int16_t queue_num;                    /* number of this queue */
54         u_int8_t copy_mode;
55 /*
56  * Following fields are dirtied for each queued packet,
57  * keep them in same cache line if possible.
58  */
59         spinlock_t      lock;
60         unsigned int    queue_total;
61         atomic_t        id_sequence;            /* 'sequence' of pkt ids */
62         struct list_head queue_list;            /* packets in queue */
63 };
64
65 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
66
67 static DEFINE_SPINLOCK(instances_lock);
68
69 #define INSTANCE_BUCKETS        16
70 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly;
71
72 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
73 {
74         return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
75 }
76
77 static struct nfqnl_instance *
78 instance_lookup(u_int16_t queue_num)
79 {
80         struct hlist_head *head;
81         struct hlist_node *pos;
82         struct nfqnl_instance *inst;
83
84         head = &instance_table[instance_hashfn(queue_num)];
85         hlist_for_each_entry_rcu(inst, pos, head, hlist) {
86                 if (inst->queue_num == queue_num)
87                         return inst;
88         }
89         return NULL;
90 }
91
92 static struct nfqnl_instance *
93 instance_create(u_int16_t queue_num, int pid)
94 {
95         struct nfqnl_instance *inst;
96         unsigned int h;
97         int err;
98
99         spin_lock(&instances_lock);
100         if (instance_lookup(queue_num)) {
101                 err = -EEXIST;
102                 goto out_unlock;
103         }
104
105         inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
106         if (!inst) {
107                 err = -ENOMEM;
108                 goto out_unlock;
109         }
110
111         inst->queue_num = queue_num;
112         inst->peer_pid = pid;
113         inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
114         inst->copy_range = 0xfffff;
115         inst->copy_mode = NFQNL_COPY_NONE;
116         spin_lock_init(&inst->lock);
117         INIT_LIST_HEAD(&inst->queue_list);
118
119         if (!try_module_get(THIS_MODULE)) {
120                 err = -EAGAIN;
121                 goto out_free;
122         }
123
124         h = instance_hashfn(queue_num);
125         hlist_add_head_rcu(&inst->hlist, &instance_table[h]);
126
127         spin_unlock(&instances_lock);
128
129         return inst;
130
131 out_free:
132         kfree(inst);
133 out_unlock:
134         spin_unlock(&instances_lock);
135         return ERR_PTR(err);
136 }
137
138 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
139                         unsigned long data);
140
141 static void
142 instance_destroy_rcu(struct rcu_head *head)
143 {
144         struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
145                                                    rcu);
146
147         nfqnl_flush(inst, NULL, 0);
148         kfree(inst);
149         module_put(THIS_MODULE);
150 }
151
152 static void
153 __instance_destroy(struct nfqnl_instance *inst)
154 {
155         hlist_del_rcu(&inst->hlist);
156         call_rcu(&inst->rcu, instance_destroy_rcu);
157 }
158
159 static void
160 instance_destroy(struct nfqnl_instance *inst)
161 {
162         spin_lock(&instances_lock);
163         __instance_destroy(inst);
164         spin_unlock(&instances_lock);
165 }
166
167 static inline void
168 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
169 {
170        list_add_tail(&entry->list, &queue->queue_list);
171        queue->queue_total++;
172 }
173
174 static struct nf_queue_entry *
175 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
176 {
177         struct nf_queue_entry *entry = NULL, *i;
178
179         spin_lock_bh(&queue->lock);
180
181         list_for_each_entry(i, &queue->queue_list, list) {
182                 if (i->id == id) {
183                         entry = i;
184                         break;
185                 }
186         }
187
188         if (entry) {
189                 list_del(&entry->list);
190                 queue->queue_total--;
191         }
192
193         spin_unlock_bh(&queue->lock);
194
195         return entry;
196 }
197
198 static void
199 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
200 {
201         struct nf_queue_entry *entry, *next;
202
203         spin_lock_bh(&queue->lock);
204         list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
205                 if (!cmpfn || cmpfn(entry, data)) {
206                         list_del(&entry->list);
207                         queue->queue_total--;
208                         nf_reinject(entry, NF_DROP);
209                 }
210         }
211         spin_unlock_bh(&queue->lock);
212 }
213
214 static struct sk_buff *
215 nfqnl_build_packet_message(struct nfqnl_instance *queue,
216                            struct nf_queue_entry *entry)
217 {
218         sk_buff_data_t old_tail;
219         size_t size;
220         size_t data_len = 0;
221         struct sk_buff *skb;
222         struct nfqnl_msg_packet_hdr pmsg;
223         struct nlmsghdr *nlh;
224         struct nfgenmsg *nfmsg;
225         struct sk_buff *entskb = entry->skb;
226         struct net_device *indev;
227         struct net_device *outdev;
228
229         size =    NLMSG_SPACE(sizeof(struct nfgenmsg))
230                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
231                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
232                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
233 #ifdef CONFIG_BRIDGE_NETFILTER
234                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
235                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
236 #endif
237                 + nla_total_size(sizeof(u_int32_t))     /* mark */
238                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
239                 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
240
241         outdev = entry->outdev;
242
243         switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
244         case NFQNL_COPY_META:
245         case NFQNL_COPY_NONE:
246                 break;
247
248         case NFQNL_COPY_PACKET:
249                 if (entskb->ip_summed == CHECKSUM_PARTIAL &&
250                     skb_checksum_help(entskb))
251                         return NULL;
252
253                 data_len = ACCESS_ONCE(queue->copy_range);
254                 if (data_len == 0 || data_len > entskb->len)
255                         data_len = entskb->len;
256
257                 size += nla_total_size(data_len);
258                 break;
259         }
260
261
262         skb = alloc_skb(size, GFP_ATOMIC);
263         if (!skb)
264                 goto nlmsg_failure;
265
266         old_tail = skb->tail;
267         nlh = NLMSG_PUT(skb, 0, 0,
268                         NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
269                         sizeof(struct nfgenmsg));
270         nfmsg = NLMSG_DATA(nlh);
271         nfmsg->nfgen_family = entry->pf;
272         nfmsg->version = NFNETLINK_V0;
273         nfmsg->res_id = htons(queue->queue_num);
274
275         entry->id = atomic_inc_return(&queue->id_sequence);
276         pmsg.packet_id          = htonl(entry->id);
277         pmsg.hw_protocol        = entskb->protocol;
278         pmsg.hook               = entry->hook;
279
280         NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
281
282         indev = entry->indev;
283         if (indev) {
284 #ifndef CONFIG_BRIDGE_NETFILTER
285                 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex));
286 #else
287                 if (entry->pf == PF_BRIDGE) {
288                         /* Case 1: indev is physical input device, we need to
289                          * look for bridge group (when called from
290                          * netfilter_bridge) */
291                         NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
292                                      htonl(indev->ifindex));
293                         /* this is the bridge group "brX" */
294                         /* rcu_read_lock()ed by __nf_queue */
295                         NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
296                                      htonl(br_port_get_rcu(indev)->br->dev->ifindex));
297                 } else {
298                         /* Case 2: indev is bridge group, we need to look for
299                          * physical device (when called from ipv4) */
300                         NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
301                                      htonl(indev->ifindex));
302                         if (entskb->nf_bridge && entskb->nf_bridge->physindev)
303                                 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
304                                              htonl(entskb->nf_bridge->physindev->ifindex));
305                 }
306 #endif
307         }
308
309         if (outdev) {
310 #ifndef CONFIG_BRIDGE_NETFILTER
311                 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex));
312 #else
313                 if (entry->pf == PF_BRIDGE) {
314                         /* Case 1: outdev is physical output device, we need to
315                          * look for bridge group (when called from
316                          * netfilter_bridge) */
317                         NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
318                                      htonl(outdev->ifindex));
319                         /* this is the bridge group "brX" */
320                         /* rcu_read_lock()ed by __nf_queue */
321                         NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
322                                      htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
323                 } else {
324                         /* Case 2: outdev is bridge group, we need to look for
325                          * physical output device (when called from ipv4) */
326                         NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
327                                      htonl(outdev->ifindex));
328                         if (entskb->nf_bridge && entskb->nf_bridge->physoutdev)
329                                 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
330                                              htonl(entskb->nf_bridge->physoutdev->ifindex));
331                 }
332 #endif
333         }
334
335         if (entskb->mark)
336                 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
337
338         if (indev && entskb->dev) {
339                 struct nfqnl_msg_packet_hw phw;
340                 int len = dev_parse_header(entskb, phw.hw_addr);
341                 if (len) {
342                         phw.hw_addrlen = htons(len);
343                         NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
344                 }
345         }
346
347         if (entskb->tstamp.tv64) {
348                 struct nfqnl_msg_packet_timestamp ts;
349                 struct timeval tv = ktime_to_timeval(entskb->tstamp);
350                 ts.sec = cpu_to_be64(tv.tv_sec);
351                 ts.usec = cpu_to_be64(tv.tv_usec);
352
353                 NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
354         }
355
356         if (data_len) {
357                 struct nlattr *nla;
358                 int sz = nla_attr_size(data_len);
359
360                 if (skb_tailroom(skb) < nla_total_size(data_len)) {
361                         printk(KERN_WARNING "nf_queue: no tailroom!\n");
362                         goto nlmsg_failure;
363                 }
364
365                 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
366                 nla->nla_type = NFQA_PAYLOAD;
367                 nla->nla_len = sz;
368
369                 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len))
370                         BUG();
371         }
372
373         nlh->nlmsg_len = skb->tail - old_tail;
374         return skb;
375
376 nlmsg_failure:
377 nla_put_failure:
378         if (skb)
379                 kfree_skb(skb);
380         if (net_ratelimit())
381                 printk(KERN_ERR "nf_queue: error creating packet message\n");
382         return NULL;
383 }
384
385 static int
386 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
387 {
388         struct sk_buff *nskb;
389         struct nfqnl_instance *queue;
390         int err = -ENOBUFS;
391
392         /* rcu_read_lock()ed by nf_hook_slow() */
393         queue = instance_lookup(queuenum);
394         if (!queue) {
395                 err = -ESRCH;
396                 goto err_out;
397         }
398
399         if (queue->copy_mode == NFQNL_COPY_NONE) {
400                 err = -EINVAL;
401                 goto err_out;
402         }
403
404         nskb = nfqnl_build_packet_message(queue, entry);
405         if (nskb == NULL) {
406                 err = -ENOMEM;
407                 goto err_out;
408         }
409         spin_lock_bh(&queue->lock);
410
411         if (!queue->peer_pid) {
412                 err = -EINVAL;
413                 goto err_out_free_nskb;
414         }
415         if (queue->queue_total >= queue->queue_maxlen) {
416                 queue->queue_dropped++;
417                 if (net_ratelimit())
418                           printk(KERN_WARNING "nf_queue: full at %d entries, "
419                                  "dropping packets(s).\n",
420                                  queue->queue_total);
421                 goto err_out_free_nskb;
422         }
423
424         /* nfnetlink_unicast will either free the nskb or add it to a socket */
425         err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
426         if (err < 0) {
427                 queue->queue_user_dropped++;
428                 goto err_out_unlock;
429         }
430
431         __enqueue_entry(queue, entry);
432
433         spin_unlock_bh(&queue->lock);
434         return 0;
435
436 err_out_free_nskb:
437         kfree_skb(nskb);
438 err_out_unlock:
439         spin_unlock_bh(&queue->lock);
440 err_out:
441         return err;
442 }
443
444 static int
445 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
446 {
447         struct sk_buff *nskb;
448         int diff;
449
450         diff = data_len - e->skb->len;
451         if (diff < 0) {
452                 if (pskb_trim(e->skb, data_len))
453                         return -ENOMEM;
454         } else if (diff > 0) {
455                 if (data_len > 0xFFFF)
456                         return -EINVAL;
457                 if (diff > skb_tailroom(e->skb)) {
458                         nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
459                                                diff, GFP_ATOMIC);
460                         if (!nskb) {
461                                 printk(KERN_WARNING "nf_queue: OOM "
462                                       "in mangle, dropping packet\n");
463                                 return -ENOMEM;
464                         }
465                         kfree_skb(e->skb);
466                         e->skb = nskb;
467                 }
468                 skb_put(e->skb, diff);
469         }
470         if (!skb_make_writable(e->skb, data_len))
471                 return -ENOMEM;
472         skb_copy_to_linear_data(e->skb, data, data_len);
473         e->skb->ip_summed = CHECKSUM_NONE;
474         return 0;
475 }
476
477 static int
478 nfqnl_set_mode(struct nfqnl_instance *queue,
479                unsigned char mode, unsigned int range)
480 {
481         int status = 0;
482
483         spin_lock_bh(&queue->lock);
484         switch (mode) {
485         case NFQNL_COPY_NONE:
486         case NFQNL_COPY_META:
487                 queue->copy_mode = mode;
488                 queue->copy_range = 0;
489                 break;
490
491         case NFQNL_COPY_PACKET:
492                 queue->copy_mode = mode;
493                 /* we're using struct nlattr which has 16bit nla_len */
494                 if (range > 0xffff)
495                         queue->copy_range = 0xffff;
496                 else
497                         queue->copy_range = range;
498                 break;
499
500         default:
501                 status = -EINVAL;
502
503         }
504         spin_unlock_bh(&queue->lock);
505
506         return status;
507 }
508
509 static int
510 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
511 {
512         if (entry->indev)
513                 if (entry->indev->ifindex == ifindex)
514                         return 1;
515         if (entry->outdev)
516                 if (entry->outdev->ifindex == ifindex)
517                         return 1;
518 #ifdef CONFIG_BRIDGE_NETFILTER
519         if (entry->skb->nf_bridge) {
520                 if (entry->skb->nf_bridge->physindev &&
521                     entry->skb->nf_bridge->physindev->ifindex == ifindex)
522                         return 1;
523                 if (entry->skb->nf_bridge->physoutdev &&
524                     entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
525                         return 1;
526         }
527 #endif
528         return 0;
529 }
530
531 /* drop all packets with either indev or outdev == ifindex from all queue
532  * instances */
533 static void
534 nfqnl_dev_drop(int ifindex)
535 {
536         int i;
537
538         rcu_read_lock();
539
540         for (i = 0; i < INSTANCE_BUCKETS; i++) {
541                 struct hlist_node *tmp;
542                 struct nfqnl_instance *inst;
543                 struct hlist_head *head = &instance_table[i];
544
545                 hlist_for_each_entry_rcu(inst, tmp, head, hlist)
546                         nfqnl_flush(inst, dev_cmp, ifindex);
547         }
548
549         rcu_read_unlock();
550 }
551
552 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
553
554 static int
555 nfqnl_rcv_dev_event(struct notifier_block *this,
556                     unsigned long event, void *ptr)
557 {
558         struct net_device *dev = ptr;
559
560         if (!net_eq(dev_net(dev), &init_net))
561                 return NOTIFY_DONE;
562
563         /* Drop any packets associated with the downed device */
564         if (event == NETDEV_DOWN)
565                 nfqnl_dev_drop(dev->ifindex);
566         return NOTIFY_DONE;
567 }
568
569 static struct notifier_block nfqnl_dev_notifier = {
570         .notifier_call  = nfqnl_rcv_dev_event,
571 };
572
573 static int
574 nfqnl_rcv_nl_event(struct notifier_block *this,
575                    unsigned long event, void *ptr)
576 {
577         struct netlink_notify *n = ptr;
578
579         if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
580                 int i;
581
582                 /* destroy all instances for this pid */
583                 spin_lock(&instances_lock);
584                 for (i = 0; i < INSTANCE_BUCKETS; i++) {
585                         struct hlist_node *tmp, *t2;
586                         struct nfqnl_instance *inst;
587                         struct hlist_head *head = &instance_table[i];
588
589                         hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
590                                 if ((n->net == &init_net) &&
591                                     (n->pid == inst->peer_pid))
592                                         __instance_destroy(inst);
593                         }
594                 }
595                 spin_unlock(&instances_lock);
596         }
597         return NOTIFY_DONE;
598 }
599
600 static struct notifier_block nfqnl_rtnl_notifier = {
601         .notifier_call  = nfqnl_rcv_nl_event,
602 };
603
604 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
605         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
606         [NFQA_MARK]             = { .type = NLA_U32 },
607         [NFQA_PAYLOAD]          = { .type = NLA_UNSPEC },
608 };
609
610 static int
611 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
612                    const struct nlmsghdr *nlh,
613                    const struct nlattr * const nfqa[])
614 {
615         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
616         u_int16_t queue_num = ntohs(nfmsg->res_id);
617
618         struct nfqnl_msg_verdict_hdr *vhdr;
619         struct nfqnl_instance *queue;
620         unsigned int verdict;
621         struct nf_queue_entry *entry;
622
623         queue = instance_lookup(queue_num);
624         if (!queue)
625                 return -ENODEV;
626
627         if (queue->peer_pid != NETLINK_CB(skb).pid)
628                 return -EPERM;
629
630         if (!nfqa[NFQA_VERDICT_HDR])
631                 return -EINVAL;
632
633         vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
634         verdict = ntohl(vhdr->verdict);
635
636         if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT)
637                 return -EINVAL;
638
639         entry = find_dequeue_entry(queue, ntohl(vhdr->id));
640         if (entry == NULL)
641                 return -ENOENT;
642
643         if (nfqa[NFQA_PAYLOAD]) {
644                 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
645                                  nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
646                         verdict = NF_DROP;
647         }
648
649         if (nfqa[NFQA_MARK])
650                 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
651
652         nf_reinject(entry, verdict);
653         return 0;
654 }
655
656 static int
657 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
658                   const struct nlmsghdr *nlh,
659                   const struct nlattr * const nfqa[])
660 {
661         return -ENOTSUPP;
662 }
663
664 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
665         [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
666         [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
667 };
668
669 static const struct nf_queue_handler nfqh = {
670         .name   = "nf_queue",
671         .outfn  = &nfqnl_enqueue_packet,
672 };
673
674 static int
675 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
676                   const struct nlmsghdr *nlh,
677                   const struct nlattr * const nfqa[])
678 {
679         struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
680         u_int16_t queue_num = ntohs(nfmsg->res_id);
681         struct nfqnl_instance *queue;
682         struct nfqnl_msg_config_cmd *cmd = NULL;
683         int ret = 0;
684
685         if (nfqa[NFQA_CFG_CMD]) {
686                 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
687
688                 /* Commands without queue context - might sleep */
689                 switch (cmd->command) {
690                 case NFQNL_CFG_CMD_PF_BIND:
691                         return nf_register_queue_handler(ntohs(cmd->pf),
692                                                          &nfqh);
693                 case NFQNL_CFG_CMD_PF_UNBIND:
694                         return nf_unregister_queue_handler(ntohs(cmd->pf),
695                                                            &nfqh);
696                 }
697         }
698
699         rcu_read_lock();
700         queue = instance_lookup(queue_num);
701         if (queue && queue->peer_pid != NETLINK_CB(skb).pid) {
702                 ret = -EPERM;
703                 goto err_out_unlock;
704         }
705
706         if (cmd != NULL) {
707                 switch (cmd->command) {
708                 case NFQNL_CFG_CMD_BIND:
709                         if (queue) {
710                                 ret = -EBUSY;
711                                 goto err_out_unlock;
712                         }
713                         queue = instance_create(queue_num, NETLINK_CB(skb).pid);
714                         if (IS_ERR(queue)) {
715                                 ret = PTR_ERR(queue);
716                                 goto err_out_unlock;
717                         }
718                         break;
719                 case NFQNL_CFG_CMD_UNBIND:
720                         if (!queue) {
721                                 ret = -ENODEV;
722                                 goto err_out_unlock;
723                         }
724                         instance_destroy(queue);
725                         break;
726                 case NFQNL_CFG_CMD_PF_BIND:
727                 case NFQNL_CFG_CMD_PF_UNBIND:
728                         break;
729                 default:
730                         ret = -ENOTSUPP;
731                         break;
732                 }
733         }
734
735         if (nfqa[NFQA_CFG_PARAMS]) {
736                 struct nfqnl_msg_config_params *params;
737
738                 if (!queue) {
739                         ret = -ENODEV;
740                         goto err_out_unlock;
741                 }
742                 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
743                 nfqnl_set_mode(queue, params->copy_mode,
744                                 ntohl(params->copy_range));
745         }
746
747         if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
748                 __be32 *queue_maxlen;
749
750                 if (!queue) {
751                         ret = -ENODEV;
752                         goto err_out_unlock;
753                 }
754                 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
755                 spin_lock_bh(&queue->lock);
756                 queue->queue_maxlen = ntohl(*queue_maxlen);
757                 spin_unlock_bh(&queue->lock);
758         }
759
760 err_out_unlock:
761         rcu_read_unlock();
762         return ret;
763 }
764
765 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
766         [NFQNL_MSG_PACKET]      = { .call_rcu = nfqnl_recv_unsupp,
767                                     .attr_count = NFQA_MAX, },
768         [NFQNL_MSG_VERDICT]     = { .call_rcu = nfqnl_recv_verdict,
769                                     .attr_count = NFQA_MAX,
770                                     .policy = nfqa_verdict_policy },
771         [NFQNL_MSG_CONFIG]      = { .call = nfqnl_recv_config,
772                                     .attr_count = NFQA_CFG_MAX,
773                                     .policy = nfqa_cfg_policy },
774 };
775
776 static const struct nfnetlink_subsystem nfqnl_subsys = {
777         .name           = "nf_queue",
778         .subsys_id      = NFNL_SUBSYS_QUEUE,
779         .cb_count       = NFQNL_MSG_MAX,
780         .cb             = nfqnl_cb,
781 };
782
783 #ifdef CONFIG_PROC_FS
784 struct iter_state {
785         unsigned int bucket;
786 };
787
788 static struct hlist_node *get_first(struct seq_file *seq)
789 {
790         struct iter_state *st = seq->private;
791
792         if (!st)
793                 return NULL;
794
795         for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
796                 if (!hlist_empty(&instance_table[st->bucket]))
797                         return instance_table[st->bucket].first;
798         }
799         return NULL;
800 }
801
802 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
803 {
804         struct iter_state *st = seq->private;
805
806         h = h->next;
807         while (!h) {
808                 if (++st->bucket >= INSTANCE_BUCKETS)
809                         return NULL;
810
811                 h = instance_table[st->bucket].first;
812         }
813         return h;
814 }
815
816 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
817 {
818         struct hlist_node *head;
819         head = get_first(seq);
820
821         if (head)
822                 while (pos && (head = get_next(seq, head)))
823                         pos--;
824         return pos ? NULL : head;
825 }
826
827 static void *seq_start(struct seq_file *seq, loff_t *pos)
828         __acquires(instances_lock)
829 {
830         spin_lock(&instances_lock);
831         return get_idx(seq, *pos);
832 }
833
834 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
835 {
836         (*pos)++;
837         return get_next(s, v);
838 }
839
840 static void seq_stop(struct seq_file *s, void *v)
841         __releases(instances_lock)
842 {
843         spin_unlock(&instances_lock);
844 }
845
846 static int seq_show(struct seq_file *s, void *v)
847 {
848         const struct nfqnl_instance *inst = v;
849
850         return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
851                           inst->queue_num,
852                           inst->peer_pid, inst->queue_total,
853                           inst->copy_mode, inst->copy_range,
854                           inst->queue_dropped, inst->queue_user_dropped,
855                           atomic_read(&inst->id_sequence), 1);
856 }
857
858 static const struct seq_operations nfqnl_seq_ops = {
859         .start  = seq_start,
860         .next   = seq_next,
861         .stop   = seq_stop,
862         .show   = seq_show,
863 };
864
865 static int nfqnl_open(struct inode *inode, struct file *file)
866 {
867         return seq_open_private(file, &nfqnl_seq_ops,
868                         sizeof(struct iter_state));
869 }
870
871 static const struct file_operations nfqnl_file_ops = {
872         .owner   = THIS_MODULE,
873         .open    = nfqnl_open,
874         .read    = seq_read,
875         .llseek  = seq_lseek,
876         .release = seq_release_private,
877 };
878
879 #endif /* PROC_FS */
880
881 static int __init nfnetlink_queue_init(void)
882 {
883         int i, status = -ENOMEM;
884
885         for (i = 0; i < INSTANCE_BUCKETS; i++)
886                 INIT_HLIST_HEAD(&instance_table[i]);
887
888         netlink_register_notifier(&nfqnl_rtnl_notifier);
889         status = nfnetlink_subsys_register(&nfqnl_subsys);
890         if (status < 0) {
891                 printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
892                 goto cleanup_netlink_notifier;
893         }
894
895 #ifdef CONFIG_PROC_FS
896         if (!proc_create("nfnetlink_queue", 0440,
897                          proc_net_netfilter, &nfqnl_file_ops))
898                 goto cleanup_subsys;
899 #endif
900
901         register_netdevice_notifier(&nfqnl_dev_notifier);
902         return status;
903
904 #ifdef CONFIG_PROC_FS
905 cleanup_subsys:
906         nfnetlink_subsys_unregister(&nfqnl_subsys);
907 #endif
908 cleanup_netlink_notifier:
909         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
910         return status;
911 }
912
913 static void __exit nfnetlink_queue_fini(void)
914 {
915         nf_unregister_queue_handlers(&nfqh);
916         unregister_netdevice_notifier(&nfqnl_dev_notifier);
917 #ifdef CONFIG_PROC_FS
918         remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
919 #endif
920         nfnetlink_subsys_unregister(&nfqnl_subsys);
921         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
922
923         rcu_barrier(); /* Wait for completion of call_rcu()'s */
924 }
925
926 MODULE_DESCRIPTION("netfilter packet queue handler");
927 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
928 MODULE_LICENSE("GPL");
929 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
930
931 module_init(nfnetlink_queue_init);
932 module_exit(nfnetlink_queue_fini);