5 * Bart De Schuymer <bdschuym@pandora.be>
7 * ebtables.c,v 2.0, July, 2002
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
30 /* needed for logical [in,out]-dev filtering */
31 #include "../br_private.h"
33 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 "report to author: "format, ## args)
35 /* #define BUGPRINT(format, args...) */
38 * Each cpu has its own set of counters, so there is no need for write_lock in
40 * For reading or updating the counters, the user context needs to
44 /* The size of each set of counters is altered to get cache alignment */
45 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
46 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
47 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
48 COUNTER_OFFSET(n) * cpu))
52 static DEFINE_MUTEX(ebt_mutex);
55 static void ebt_standard_compat_from_user(void *dst, const void *src)
57 int v = *(compat_int_t *)src;
60 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61 memcpy(dst, &v, sizeof(v));
64 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
66 compat_int_t cv = *(int *)src;
69 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
75 static struct xt_target ebt_standard_target = {
78 .family = NFPROTO_BRIDGE,
79 .targetsize = sizeof(int),
81 .compatsize = sizeof(compat_int_t),
82 .compat_from_user = ebt_standard_compat_from_user,
83 .compat_to_user = ebt_standard_compat_to_user,
88 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
89 struct xt_action_param *par)
91 par->target = w->u.watcher;
92 par->targinfo = w->data;
93 w->u.watcher->target(skb, par);
94 /* watchers don't give a verdict */
99 ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
100 struct xt_action_param *par)
102 par->match = m->u.match;
103 par->matchinfo = m->data;
104 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
108 ebt_dev_check(const char *entry, const struct net_device *device)
117 devname = device->name;
118 /* 1 is the wildcard token */
119 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
121 return (devname[i] != entry[i] && entry[i] != 1);
124 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
125 /* process standard matches */
127 ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
128 const struct net_device *in, const struct net_device *out)
130 const struct ethhdr *h = eth_hdr(skb);
134 if (vlan_tx_tag_present(skb))
135 ethproto = htons(ETH_P_8021Q);
137 ethproto = h->h_proto;
139 if (e->bitmask & EBT_802_3) {
140 if (FWINV2(ntohs(ethproto) >= 1536, EBT_IPROTO))
142 } else if (!(e->bitmask & EBT_NOPROTO) &&
143 FWINV2(e->ethproto != ethproto, EBT_IPROTO))
146 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
148 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
150 /* rcu_read_lock()ed by nf_hook_slow */
151 if (in && br_port_exists(in) &&
152 FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev),
155 if (out && br_port_exists(out) &&
156 FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev),
160 if (e->bitmask & EBT_SOURCEMAC) {
162 for (i = 0; i < 6; i++)
163 verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
165 if (FWINV2(verdict != 0, EBT_ISOURCE) )
168 if (e->bitmask & EBT_DESTMAC) {
170 for (i = 0; i < 6; i++)
171 verdict |= (h->h_dest[i] ^ e->destmac[i]) &
173 if (FWINV2(verdict != 0, EBT_IDEST) )
180 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
182 return (void *)entry + entry->next_offset;
185 /* Do some firewalling */
186 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
187 const struct net_device *in, const struct net_device *out,
188 struct ebt_table *table)
191 struct ebt_entry *point;
192 struct ebt_counter *counter_base, *cb_base;
193 const struct ebt_entry_target *t;
195 struct ebt_chainstack *cs;
196 struct ebt_entries *chaininfo;
198 const struct ebt_table_info *private;
199 struct xt_action_param acpar;
201 acpar.family = NFPROTO_BRIDGE;
204 acpar.hotdrop = false;
205 acpar.hooknum = hook;
207 read_lock_bh(&table->lock);
208 private = table->private;
209 cb_base = COUNTER_BASE(private->counters, private->nentries,
211 if (private->chainstack)
212 cs = private->chainstack[smp_processor_id()];
215 chaininfo = private->hook_entry[hook];
216 nentries = private->hook_entry[hook]->nentries;
217 point = (struct ebt_entry *)(private->hook_entry[hook]->data);
218 counter_base = cb_base + private->hook_entry[hook]->counter_offset;
219 /* base for chain jumps */
220 base = private->entries;
222 while (i < nentries) {
223 if (ebt_basic_match(point, skb, in, out))
226 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
229 read_unlock_bh(&table->lock);
233 /* increase counter */
234 (*(counter_base + i)).pcnt++;
235 (*(counter_base + i)).bcnt += skb->len;
237 /* these should only watch: not modify, nor tell us
238 what to do with the packet */
239 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
241 t = (struct ebt_entry_target *)
242 (((char *)point) + point->target_offset);
243 /* standard target */
244 if (!t->u.target->target)
245 verdict = ((struct ebt_standard_target *)t)->verdict;
247 acpar.target = t->u.target;
248 acpar.targinfo = t->data;
249 verdict = t->u.target->target(skb, &acpar);
251 if (verdict == EBT_ACCEPT) {
252 read_unlock_bh(&table->lock);
255 if (verdict == EBT_DROP) {
256 read_unlock_bh(&table->lock);
259 if (verdict == EBT_RETURN) {
261 #ifdef CONFIG_NETFILTER_DEBUG
263 BUGPRINT("RETURN on base chain");
264 /* act like this is EBT_CONTINUE */
269 /* put all the local variables right */
271 chaininfo = cs[sp].chaininfo;
272 nentries = chaininfo->nentries;
274 counter_base = cb_base +
275 chaininfo->counter_offset;
278 if (verdict == EBT_CONTINUE)
280 #ifdef CONFIG_NETFILTER_DEBUG
282 BUGPRINT("bogus standard verdict\n");
283 read_unlock_bh(&table->lock);
289 cs[sp].chaininfo = chaininfo;
290 cs[sp].e = ebt_next_entry(point);
292 chaininfo = (struct ebt_entries *) (base + verdict);
293 #ifdef CONFIG_NETFILTER_DEBUG
294 if (chaininfo->distinguisher) {
295 BUGPRINT("jump to non-chain\n");
296 read_unlock_bh(&table->lock);
300 nentries = chaininfo->nentries;
301 point = (struct ebt_entry *)chaininfo->data;
302 counter_base = cb_base + chaininfo->counter_offset;
306 point = ebt_next_entry(point);
310 /* I actually like this :) */
311 if (chaininfo->policy == EBT_RETURN)
313 if (chaininfo->policy == EBT_ACCEPT) {
314 read_unlock_bh(&table->lock);
317 read_unlock_bh(&table->lock);
321 /* If it succeeds, returns element and locks mutex */
323 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
327 struct list_head list;
328 char name[EBT_FUNCTION_MAXNAMELEN];
331 *error = mutex_lock_interruptible(mutex);
335 list_for_each_entry(e, head, list) {
336 if (strcmp(e->name, name) == 0)
345 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
346 int *error, struct mutex *mutex)
348 return try_then_request_module(
349 find_inlist_lock_noload(head, name, error, mutex),
350 "%s%s", prefix, name);
353 static inline struct ebt_table *
354 find_table_lock(struct net *net, const char *name, int *error,
357 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
358 "ebtable_", error, mutex);
362 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
365 const struct ebt_entry *e = par->entryinfo;
366 struct xt_match *match;
367 size_t left = ((char *)e + e->watchers_offset) - (char *)m;
370 if (left < sizeof(struct ebt_entry_match) ||
371 left - sizeof(struct ebt_entry_match) < m->match_size)
374 match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
376 return PTR_ERR(match);
380 par->matchinfo = m->data;
381 ret = xt_check_match(par, m->match_size,
382 e->ethproto, e->invflags & EBT_IPROTO);
384 module_put(match->me);
393 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
396 const struct ebt_entry *e = par->entryinfo;
397 struct xt_target *watcher;
398 size_t left = ((char *)e + e->target_offset) - (char *)w;
401 if (left < sizeof(struct ebt_entry_watcher) ||
402 left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
405 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
407 return PTR_ERR(watcher);
408 w->u.watcher = watcher;
410 par->target = watcher;
411 par->targinfo = w->data;
412 ret = xt_check_target(par, w->watcher_size,
413 e->ethproto, e->invflags & EBT_IPROTO);
415 module_put(watcher->me);
423 static int ebt_verify_pointers(const struct ebt_replace *repl,
424 struct ebt_table_info *newinfo)
426 unsigned int limit = repl->entries_size;
427 unsigned int valid_hooks = repl->valid_hooks;
428 unsigned int offset = 0;
431 for (i = 0; i < NF_BR_NUMHOOKS; i++)
432 newinfo->hook_entry[i] = NULL;
434 newinfo->entries_size = repl->entries_size;
435 newinfo->nentries = repl->nentries;
437 while (offset < limit) {
438 size_t left = limit - offset;
439 struct ebt_entry *e = (void *)newinfo->entries + offset;
441 if (left < sizeof(unsigned int))
444 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
445 if ((valid_hooks & (1 << i)) == 0)
447 if ((char __user *)repl->hook_entry[i] ==
448 repl->entries + offset)
452 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
453 if (e->bitmask != 0) {
454 /* we make userspace set this right,
455 so there is no misunderstanding */
456 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
457 "in distinguisher\n");
460 if (i != NF_BR_NUMHOOKS)
461 newinfo->hook_entry[i] = (struct ebt_entries *)e;
462 if (left < sizeof(struct ebt_entries))
464 offset += sizeof(struct ebt_entries);
466 if (left < sizeof(struct ebt_entry))
468 if (left < e->next_offset)
470 if (e->next_offset < sizeof(struct ebt_entry))
472 offset += e->next_offset;
475 if (offset != limit) {
476 BUGPRINT("entries_size too small\n");
480 /* check if all valid hooks have a chain */
481 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
482 if (!newinfo->hook_entry[i] &&
483 (valid_hooks & (1 << i))) {
484 BUGPRINT("Valid hook without chain\n");
492 * this one is very careful, as it is the first function
493 * to parse the userspace data
496 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
497 const struct ebt_table_info *newinfo,
498 unsigned int *n, unsigned int *cnt,
499 unsigned int *totalcnt, unsigned int *udc_cnt)
503 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
504 if ((void *)e == (void *)newinfo->hook_entry[i])
507 /* beginning of a new chain
508 if i == NF_BR_NUMHOOKS it must be a user defined chain */
509 if (i != NF_BR_NUMHOOKS || !e->bitmask) {
510 /* this checks if the previous chain has as many entries
513 BUGPRINT("nentries does not equal the nr of entries "
517 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
518 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
519 /* only RETURN from udc */
520 if (i != NF_BR_NUMHOOKS ||
521 ((struct ebt_entries *)e)->policy != EBT_RETURN) {
522 BUGPRINT("bad policy\n");
526 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
528 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
529 BUGPRINT("counter_offset != totalcnt");
532 *n = ((struct ebt_entries *)e)->nentries;
536 /* a plain old entry, heh */
537 if (sizeof(struct ebt_entry) > e->watchers_offset ||
538 e->watchers_offset > e->target_offset ||
539 e->target_offset >= e->next_offset) {
540 BUGPRINT("entry offsets not in right order\n");
543 /* this is not checked anywhere else */
544 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
545 BUGPRINT("target size too small\n");
555 struct ebt_chainstack cs;
557 unsigned int hookmask;
561 * we need these positions to check that the jumps to a different part of the
562 * entries is a jump to the beginning of a new chain.
565 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
566 unsigned int *n, struct ebt_cl_stack *udc)
570 /* we're only interested in chain starts */
573 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
574 if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
577 /* only care about udc */
578 if (i != NF_BR_NUMHOOKS)
581 udc[*n].cs.chaininfo = (struct ebt_entries *)e;
582 /* these initialisations are depended on later in check_chainloops() */
584 udc[*n].hookmask = 0;
591 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
593 struct xt_mtdtor_param par;
595 if (i && (*i)-- == 0)
599 par.match = m->u.match;
600 par.matchinfo = m->data;
601 par.family = NFPROTO_BRIDGE;
602 if (par.match->destroy != NULL)
603 par.match->destroy(&par);
604 module_put(par.match->me);
609 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
611 struct xt_tgdtor_param par;
613 if (i && (*i)-- == 0)
617 par.target = w->u.watcher;
618 par.targinfo = w->data;
619 par.family = NFPROTO_BRIDGE;
620 if (par.target->destroy != NULL)
621 par.target->destroy(&par);
622 module_put(par.target->me);
627 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
629 struct xt_tgdtor_param par;
630 struct ebt_entry_target *t;
635 if (cnt && (*cnt)-- == 0)
637 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
638 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
639 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
642 par.target = t->u.target;
643 par.targinfo = t->data;
644 par.family = NFPROTO_BRIDGE;
645 if (par.target->destroy != NULL)
646 par.target->destroy(&par);
647 module_put(par.target->me);
652 ebt_check_entry(struct ebt_entry *e, struct net *net,
653 const struct ebt_table_info *newinfo,
654 const char *name, unsigned int *cnt,
655 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
657 struct ebt_entry_target *t;
658 struct xt_target *target;
659 unsigned int i, j, hook = 0, hookmask = 0;
662 struct xt_mtchk_param mtpar;
663 struct xt_tgchk_param tgpar;
665 /* don't mess with the struct ebt_entries */
669 if (e->bitmask & ~EBT_F_MASK) {
670 BUGPRINT("Unknown flag for bitmask\n");
673 if (e->invflags & ~EBT_INV_MASK) {
674 BUGPRINT("Unknown flag for inv bitmask\n");
677 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
678 BUGPRINT("NOPROTO & 802_3 not allowed\n");
681 /* what hook do we belong to? */
682 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
683 if (!newinfo->hook_entry[i])
685 if ((char *)newinfo->hook_entry[i] < (char *)e)
690 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
692 if (i < NF_BR_NUMHOOKS)
693 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
695 for (i = 0; i < udc_cnt; i++)
696 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
699 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
701 hookmask = cl_s[i - 1].hookmask;
705 mtpar.net = tgpar.net = net;
706 mtpar.table = tgpar.table = name;
707 mtpar.entryinfo = tgpar.entryinfo = e;
708 mtpar.hook_mask = tgpar.hook_mask = hookmask;
709 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
710 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
712 goto cleanup_matches;
714 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
716 goto cleanup_watchers;
717 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
718 gap = e->next_offset - e->target_offset;
720 target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
721 if (IS_ERR(target)) {
722 ret = PTR_ERR(target);
723 goto cleanup_watchers;
726 t->u.target = target;
727 if (t->u.target == &ebt_standard_target) {
728 if (gap < sizeof(struct ebt_standard_target)) {
729 BUGPRINT("Standard target size too big\n");
731 goto cleanup_watchers;
733 if (((struct ebt_standard_target *)t)->verdict <
734 -NUM_STANDARD_TARGETS) {
735 BUGPRINT("Invalid standard target\n");
737 goto cleanup_watchers;
739 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
740 module_put(t->u.target->me);
742 goto cleanup_watchers;
745 tgpar.target = target;
746 tgpar.targinfo = t->data;
747 ret = xt_check_target(&tgpar, t->target_size,
748 e->ethproto, e->invflags & EBT_IPROTO);
750 module_put(target->me);
751 goto cleanup_watchers;
756 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
758 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
763 * checks for loops and sets the hook mask for udc
764 * the hook mask for udc tells us from which base chains the udc can be
765 * accessed. This mask is a parameter to the check() functions of the extensions
767 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
768 unsigned int udc_cnt, unsigned int hooknr, char *base)
770 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
771 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
772 const struct ebt_entry_target *t;
774 while (pos < nentries || chain_nr != -1) {
775 /* end of udc, go back one 'recursion' step */
776 if (pos == nentries) {
777 /* put back values of the time when this chain was called */
778 e = cl_s[chain_nr].cs.e;
779 if (cl_s[chain_nr].from != -1)
781 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
783 nentries = chain->nentries;
784 pos = cl_s[chain_nr].cs.n;
785 /* make sure we won't see a loop that isn't one */
786 cl_s[chain_nr].cs.n = 0;
787 chain_nr = cl_s[chain_nr].from;
791 t = (struct ebt_entry_target *)
792 (((char *)e) + e->target_offset);
793 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
795 if (e->target_offset + sizeof(struct ebt_standard_target) >
797 BUGPRINT("Standard target size too big\n");
800 verdict = ((struct ebt_standard_target *)t)->verdict;
801 if (verdict >= 0) { /* jump to another chain */
802 struct ebt_entries *hlp2 =
803 (struct ebt_entries *)(base + verdict);
804 for (i = 0; i < udc_cnt; i++)
805 if (hlp2 == cl_s[i].cs.chaininfo)
807 /* bad destination or loop */
809 BUGPRINT("bad destination\n");
816 if (cl_s[i].hookmask & (1 << hooknr))
818 /* this can't be 0, so the loop test is correct */
819 cl_s[i].cs.n = pos + 1;
821 cl_s[i].cs.e = ebt_next_entry(e);
822 e = (struct ebt_entry *)(hlp2->data);
823 nentries = hlp2->nentries;
824 cl_s[i].from = chain_nr;
826 /* this udc is accessible from the base chain for hooknr */
827 cl_s[i].hookmask |= (1 << hooknr);
831 e = ebt_next_entry(e);
837 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
838 static int translate_table(struct net *net, const char *name,
839 struct ebt_table_info *newinfo)
841 unsigned int i, j, k, udc_cnt;
843 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
846 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
848 if (i == NF_BR_NUMHOOKS) {
849 BUGPRINT("No valid hooks specified\n");
852 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
853 BUGPRINT("Chains don't start at beginning\n");
856 /* make sure chains are ordered after each other in same order
857 as their corresponding hooks */
858 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
859 if (!newinfo->hook_entry[j])
861 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
862 BUGPRINT("Hook order must be followed\n");
868 /* do some early checkings and initialize some things */
869 i = 0; /* holds the expected nr. of entries for the chain */
870 j = 0; /* holds the up to now counted entries for the chain */
871 k = 0; /* holds the total nr. of entries, should equal
872 newinfo->nentries afterwards */
873 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
874 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
875 ebt_check_entry_size_and_hooks, newinfo,
876 &i, &j, &k, &udc_cnt);
882 BUGPRINT("nentries does not equal the nr of entries in the "
886 if (k != newinfo->nentries) {
887 BUGPRINT("Total nentries is wrong\n");
891 /* get the location of the udc, put them in an array
892 while we're at it, allocate the chainstack */
894 /* this will get free'd in do_replace()/ebt_register_table()
895 if an error occurs */
896 newinfo->chainstack =
897 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
898 if (!newinfo->chainstack)
900 for_each_possible_cpu(i) {
901 newinfo->chainstack[i] =
902 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
903 if (!newinfo->chainstack[i]) {
905 vfree(newinfo->chainstack[--i]);
906 vfree(newinfo->chainstack);
907 newinfo->chainstack = NULL;
912 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
915 i = 0; /* the i'th udc */
916 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
917 ebt_get_udc_positions, newinfo, &i, cl_s);
920 BUGPRINT("i != udc_cnt\n");
926 /* Check for loops */
927 for (i = 0; i < NF_BR_NUMHOOKS; i++)
928 if (newinfo->hook_entry[i])
929 if (check_chainloops(newinfo->hook_entry[i],
930 cl_s, udc_cnt, i, newinfo->entries)) {
935 /* we now know the following (along with E=mc²):
936 - the nr of entries in each chain is right
937 - the size of the allocated space is right
938 - all valid hooks have a corresponding chain
940 - wrong data can still be on the level of a single entry
941 - could be there are jumps to places that are not the
942 beginning of a chain. This can only occur in chains that
943 are not accessible from any base chains, so we don't care. */
945 /* used to know what we need to clean up if something goes wrong */
947 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
948 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
950 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
951 ebt_cleanup_entry, net, &i);
957 /* called under write_lock */
958 static void get_counters(const struct ebt_counter *oldcounters,
959 struct ebt_counter *counters, unsigned int nentries)
962 struct ebt_counter *counter_base;
964 /* counters of cpu 0 */
965 memcpy(counters, oldcounters,
966 sizeof(struct ebt_counter) * nentries);
968 /* add other counters to those of cpu 0 */
969 for_each_possible_cpu(cpu) {
972 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
973 for (i = 0; i < nentries; i++) {
974 counters[i].pcnt += counter_base[i].pcnt;
975 counters[i].bcnt += counter_base[i].bcnt;
980 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
981 struct ebt_table_info *newinfo)
984 struct ebt_counter *counterstmp = NULL;
985 /* used to be able to unlock earlier */
986 struct ebt_table_info *table;
989 /* the user wants counters back
990 the check on the size is done later, when we have the lock */
991 if (repl->num_counters) {
992 unsigned long size = repl->num_counters * sizeof(*counterstmp);
993 counterstmp = vmalloc(size);
998 newinfo->chainstack = NULL;
999 ret = ebt_verify_pointers(repl, newinfo);
1001 goto free_counterstmp;
1003 ret = translate_table(net, repl->name, newinfo);
1006 goto free_counterstmp;
1008 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1014 /* the table doesn't like it */
1015 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1018 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1019 BUGPRINT("Wrong nr. of counters requested\n");
1024 /* we have the mutex lock, so no danger in reading this pointer */
1026 /* make sure the table can only be rmmod'ed if it contains no rules */
1027 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1030 } else if (table->nentries && !newinfo->nentries)
1032 /* we need an atomic snapshot of the counters */
1033 write_lock_bh(&t->lock);
1034 if (repl->num_counters)
1035 get_counters(t->private->counters, counterstmp,
1036 t->private->nentries);
1038 t->private = newinfo;
1039 write_unlock_bh(&t->lock);
1040 mutex_unlock(&ebt_mutex);
1041 /* so, a user can change the chains while having messed up her counter
1042 allocation. Only reason why this is done is because this way the lock
1043 is held only once, while this doesn't bring the kernel into a
1045 if (repl->num_counters &&
1046 copy_to_user(repl->counters, counterstmp,
1047 repl->num_counters * sizeof(struct ebt_counter))) {
1053 /* decrease module count and free resources */
1054 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1055 ebt_cleanup_entry, net, NULL);
1057 vfree(table->entries);
1058 if (table->chainstack) {
1059 for_each_possible_cpu(i)
1060 vfree(table->chainstack[i]);
1061 vfree(table->chainstack);
1069 mutex_unlock(&ebt_mutex);
1071 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1072 ebt_cleanup_entry, net, NULL);
1075 /* can be initialized in translate_table() */
1076 if (newinfo->chainstack) {
1077 for_each_possible_cpu(i)
1078 vfree(newinfo->chainstack[i]);
1079 vfree(newinfo->chainstack);
1084 /* replace the table */
1085 static int do_replace(struct net *net, const void __user *user,
1088 int ret, countersize;
1089 struct ebt_table_info *newinfo;
1090 struct ebt_replace tmp;
1092 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1095 if (len != sizeof(tmp) + tmp.entries_size) {
1096 BUGPRINT("Wrong len argument\n");
1100 if (tmp.entries_size == 0) {
1101 BUGPRINT("Entries_size never zero\n");
1104 /* overflow check */
1105 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1106 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1108 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1111 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1112 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1117 memset(newinfo->counters, 0, countersize);
1119 newinfo->entries = vmalloc(tmp.entries_size);
1120 if (!newinfo->entries) {
1125 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1126 BUGPRINT("Couldn't copy entries from userspace\n");
1131 ret = do_replace_finish(net, &tmp, newinfo);
1135 vfree(newinfo->entries);
1142 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1144 struct ebt_table_info *newinfo;
1145 struct ebt_table *t, *table;
1146 struct ebt_replace_kernel *repl;
1147 int ret, i, countersize;
1150 if (input_table == NULL || (repl = input_table->table) == NULL ||
1151 repl->entries == 0 || repl->entries_size == 0 ||
1152 repl->counters != NULL || input_table->private != NULL) {
1153 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1154 return ERR_PTR(-EINVAL);
1157 /* Don't add one table to multiple lists. */
1158 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1164 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1165 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1170 p = vmalloc(repl->entries_size);
1174 memcpy(p, repl->entries, repl->entries_size);
1175 newinfo->entries = p;
1177 newinfo->entries_size = repl->entries_size;
1178 newinfo->nentries = repl->nentries;
1181 memset(newinfo->counters, 0, countersize);
1183 /* fill in newinfo and parse the entries */
1184 newinfo->chainstack = NULL;
1185 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1186 if ((repl->valid_hooks & (1 << i)) == 0)
1187 newinfo->hook_entry[i] = NULL;
1189 newinfo->hook_entry[i] = p +
1190 ((char *)repl->hook_entry[i] - repl->entries);
1192 ret = translate_table(net, repl->name, newinfo);
1194 BUGPRINT("Translate_table failed\n");
1195 goto free_chainstack;
1198 if (table->check && table->check(newinfo, table->valid_hooks)) {
1199 BUGPRINT("The table doesn't like its own initial data, lol\n");
1200 return ERR_PTR(-EINVAL);
1203 table->private = newinfo;
1204 rwlock_init(&table->lock);
1205 ret = mutex_lock_interruptible(&ebt_mutex);
1207 goto free_chainstack;
1209 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1210 if (strcmp(t->name, table->name) == 0) {
1212 BUGPRINT("Table name already exists\n");
1217 /* Hold a reference count if the chains aren't empty */
1218 if (newinfo->nentries && !try_module_get(table->me)) {
1222 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1223 mutex_unlock(&ebt_mutex);
1226 mutex_unlock(&ebt_mutex);
1228 if (newinfo->chainstack) {
1229 for_each_possible_cpu(i)
1230 vfree(newinfo->chainstack[i]);
1231 vfree(newinfo->chainstack);
1233 vfree(newinfo->entries);
1239 return ERR_PTR(ret);
1242 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1247 BUGPRINT("Request to unregister NULL table!!!\n");
1250 mutex_lock(&ebt_mutex);
1251 list_del(&table->list);
1252 mutex_unlock(&ebt_mutex);
1253 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1254 ebt_cleanup_entry, net, NULL);
1255 if (table->private->nentries)
1256 module_put(table->me);
1257 vfree(table->private->entries);
1258 if (table->private->chainstack) {
1259 for_each_possible_cpu(i)
1260 vfree(table->private->chainstack[i]);
1261 vfree(table->private->chainstack);
1263 vfree(table->private);
1267 /* userspace just supplied us with counters */
1268 static int do_update_counters(struct net *net, const char *name,
1269 struct ebt_counter __user *counters,
1270 unsigned int num_counters,
1271 const void __user *user, unsigned int len)
1274 struct ebt_counter *tmp;
1275 struct ebt_table *t;
1277 if (num_counters == 0)
1280 tmp = vmalloc(num_counters * sizeof(*tmp));
1284 t = find_table_lock(net, name, &ret, &ebt_mutex);
1288 if (num_counters != t->private->nentries) {
1289 BUGPRINT("Wrong nr of counters\n");
1294 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1299 /* we want an atomic add of the counters */
1300 write_lock_bh(&t->lock);
1302 /* we add to the counters of the first cpu */
1303 for (i = 0; i < num_counters; i++) {
1304 t->private->counters[i].pcnt += tmp[i].pcnt;
1305 t->private->counters[i].bcnt += tmp[i].bcnt;
1308 write_unlock_bh(&t->lock);
1311 mutex_unlock(&ebt_mutex);
1317 static int update_counters(struct net *net, const void __user *user,
1320 struct ebt_replace hlp;
1322 if (copy_from_user(&hlp, user, sizeof(hlp)))
1325 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1328 return do_update_counters(net, hlp.name, hlp.counters,
1329 hlp.num_counters, user, len);
1332 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1333 const char *base, char __user *ubase)
1335 char __user *hlp = ubase + ((char *)m - base);
1336 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
1341 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1342 const char *base, char __user *ubase)
1344 char __user *hlp = ubase + ((char *)w - base);
1345 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
1351 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1355 const struct ebt_entry_target *t;
1357 if (e->bitmask == 0)
1360 hlp = ubase + (((char *)e + e->target_offset) - base);
1361 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1363 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1366 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1369 if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
1374 static int copy_counters_to_user(struct ebt_table *t,
1375 const struct ebt_counter *oldcounters,
1376 void __user *user, unsigned int num_counters,
1377 unsigned int nentries)
1379 struct ebt_counter *counterstmp;
1382 /* userspace might not need the counters */
1383 if (num_counters == 0)
1386 if (num_counters != nentries) {
1387 BUGPRINT("Num_counters wrong\n");
1391 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1395 write_lock_bh(&t->lock);
1396 get_counters(oldcounters, counterstmp, nentries);
1397 write_unlock_bh(&t->lock);
1399 if (copy_to_user(user, counterstmp,
1400 nentries * sizeof(struct ebt_counter)))
1406 /* called with ebt_mutex locked */
1407 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1408 const int *len, int cmd)
1410 struct ebt_replace tmp;
1411 const struct ebt_counter *oldcounters;
1412 unsigned int entries_size, nentries;
1416 if (cmd == EBT_SO_GET_ENTRIES) {
1417 entries_size = t->private->entries_size;
1418 nentries = t->private->nentries;
1419 entries = t->private->entries;
1420 oldcounters = t->private->counters;
1422 entries_size = t->table->entries_size;
1423 nentries = t->table->nentries;
1424 entries = t->table->entries;
1425 oldcounters = t->table->counters;
1428 if (copy_from_user(&tmp, user, sizeof(tmp)))
1431 if (*len != sizeof(struct ebt_replace) + entries_size +
1432 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1435 if (tmp.nentries != nentries) {
1436 BUGPRINT("Nentries wrong\n");
1440 if (tmp.entries_size != entries_size) {
1441 BUGPRINT("Wrong size\n");
1445 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1446 tmp.num_counters, nentries);
1450 if (copy_to_user(tmp.entries, entries, entries_size)) {
1451 BUGPRINT("Couldn't copy entries to userspace\n");
1454 /* set the match/watcher/target names right */
1455 return EBT_ENTRY_ITERATE(entries, entries_size,
1456 ebt_make_names, entries, tmp.entries);
1459 static int do_ebt_set_ctl(struct sock *sk,
1460 int cmd, void __user *user, unsigned int len)
1464 if (!capable(CAP_NET_ADMIN))
1468 case EBT_SO_SET_ENTRIES:
1469 ret = do_replace(sock_net(sk), user, len);
1471 case EBT_SO_SET_COUNTERS:
1472 ret = update_counters(sock_net(sk), user, len);
1480 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1483 struct ebt_replace tmp;
1484 struct ebt_table *t;
1486 if (!capable(CAP_NET_ADMIN))
1489 if (copy_from_user(&tmp, user, sizeof(tmp)))
1492 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1497 case EBT_SO_GET_INFO:
1498 case EBT_SO_GET_INIT_INFO:
1499 if (*len != sizeof(struct ebt_replace)){
1501 mutex_unlock(&ebt_mutex);
1504 if (cmd == EBT_SO_GET_INFO) {
1505 tmp.nentries = t->private->nentries;
1506 tmp.entries_size = t->private->entries_size;
1507 tmp.valid_hooks = t->valid_hooks;
1509 tmp.nentries = t->table->nentries;
1510 tmp.entries_size = t->table->entries_size;
1511 tmp.valid_hooks = t->table->valid_hooks;
1513 mutex_unlock(&ebt_mutex);
1514 if (copy_to_user(user, &tmp, *len) != 0){
1515 BUGPRINT("c2u Didn't work\n");
1522 case EBT_SO_GET_ENTRIES:
1523 case EBT_SO_GET_INIT_ENTRIES:
1524 ret = copy_everything_to_user(t, user, len, cmd);
1525 mutex_unlock(&ebt_mutex);
1529 mutex_unlock(&ebt_mutex);
1536 #ifdef CONFIG_COMPAT
1537 /* 32 bit-userspace compatibility definitions. */
1538 struct compat_ebt_replace {
1539 char name[EBT_TABLE_MAXNAMELEN];
1540 compat_uint_t valid_hooks;
1541 compat_uint_t nentries;
1542 compat_uint_t entries_size;
1543 /* start of the chains */
1544 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1545 /* nr of counters userspace expects back */
1546 compat_uint_t num_counters;
1547 /* where the kernel will put the old counters. */
1548 compat_uptr_t counters;
1549 compat_uptr_t entries;
1552 /* struct ebt_entry_match, _target and _watcher have same layout */
1553 struct compat_ebt_entry_mwt {
1555 char name[EBT_FUNCTION_MAXNAMELEN];
1558 compat_uint_t match_size;
1559 compat_uint_t data[0];
1562 /* account for possible padding between match_size and ->data */
1563 static int ebt_compat_entry_padsize(void)
1565 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1566 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1567 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1568 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1571 static int ebt_compat_match_offset(const struct xt_match *match,
1572 unsigned int userlen)
1575 * ebt_among needs special handling. The kernel .matchsize is
1576 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1577 * value is expected.
1578 * Example: userspace sends 4500, ebt_among.c wants 4504.
1580 if (unlikely(match->matchsize == -1))
1581 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1582 return xt_compat_match_offset(match);
1585 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1588 const struct xt_match *match = m->u.match;
1589 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1590 int off = ebt_compat_match_offset(match, m->match_size);
1591 compat_uint_t msize = m->match_size - off;
1593 BUG_ON(off >= m->match_size);
1595 if (copy_to_user(cm->u.name, match->name,
1596 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1599 if (match->compat_to_user) {
1600 if (match->compat_to_user(cm->data, m->data))
1602 } else if (copy_to_user(cm->data, m->data, msize))
1605 *size -= ebt_compat_entry_padsize() + off;
1611 static int compat_target_to_user(struct ebt_entry_target *t,
1612 void __user **dstptr,
1615 const struct xt_target *target = t->u.target;
1616 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1617 int off = xt_compat_target_offset(target);
1618 compat_uint_t tsize = t->target_size - off;
1620 BUG_ON(off >= t->target_size);
1622 if (copy_to_user(cm->u.name, target->name,
1623 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1626 if (target->compat_to_user) {
1627 if (target->compat_to_user(cm->data, t->data))
1629 } else if (copy_to_user(cm->data, t->data, tsize))
1632 *size -= ebt_compat_entry_padsize() + off;
1638 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1639 void __user **dstptr,
1642 return compat_target_to_user((struct ebt_entry_target *)w,
1646 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1649 struct ebt_entry_target *t;
1650 struct ebt_entry __user *ce;
1651 u32 watchers_offset, target_offset, next_offset;
1652 compat_uint_t origsize;
1655 if (e->bitmask == 0) {
1656 if (*size < sizeof(struct ebt_entries))
1658 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1661 *dstptr += sizeof(struct ebt_entries);
1662 *size -= sizeof(struct ebt_entries);
1666 if (*size < sizeof(*ce))
1669 ce = (struct ebt_entry __user *)*dstptr;
1670 if (copy_to_user(ce, e, sizeof(*ce)))
1674 *dstptr += sizeof(*ce);
1676 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1679 watchers_offset = e->watchers_offset - (origsize - *size);
1681 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1684 target_offset = e->target_offset - (origsize - *size);
1686 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1688 ret = compat_target_to_user(t, dstptr, size);
1691 next_offset = e->next_offset - (origsize - *size);
1693 if (put_user(watchers_offset, &ce->watchers_offset) ||
1694 put_user(target_offset, &ce->target_offset) ||
1695 put_user(next_offset, &ce->next_offset))
1698 *size -= sizeof(*ce);
1702 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1704 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1705 *off += ebt_compat_entry_padsize();
1709 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1711 *off += xt_compat_target_offset(w->u.watcher);
1712 *off += ebt_compat_entry_padsize();
1716 static int compat_calc_entry(const struct ebt_entry *e,
1717 const struct ebt_table_info *info,
1719 struct compat_ebt_replace *newinfo)
1721 const struct ebt_entry_target *t;
1722 unsigned int entry_offset;
1725 if (e->bitmask == 0)
1729 entry_offset = (void *)e - base;
1731 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1732 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1734 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1736 off += xt_compat_target_offset(t->u.target);
1737 off += ebt_compat_entry_padsize();
1739 newinfo->entries_size -= off;
1741 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1745 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1746 const void *hookptr = info->hook_entry[i];
1747 if (info->hook_entry[i] &&
1748 (e < (struct ebt_entry *)(base - hookptr))) {
1749 newinfo->hook_entry[i] -= off;
1750 pr_debug("0x%08X -> 0x%08X\n",
1751 newinfo->hook_entry[i] + off,
1752 newinfo->hook_entry[i]);
1760 static int compat_table_info(const struct ebt_table_info *info,
1761 struct compat_ebt_replace *newinfo)
1763 unsigned int size = info->entries_size;
1764 const void *entries = info->entries;
1766 newinfo->entries_size = size;
1768 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1772 static int compat_copy_everything_to_user(struct ebt_table *t,
1773 void __user *user, int *len, int cmd)
1775 struct compat_ebt_replace repl, tmp;
1776 struct ebt_counter *oldcounters;
1777 struct ebt_table_info tinfo;
1781 memset(&tinfo, 0, sizeof(tinfo));
1783 if (cmd == EBT_SO_GET_ENTRIES) {
1784 tinfo.entries_size = t->private->entries_size;
1785 tinfo.nentries = t->private->nentries;
1786 tinfo.entries = t->private->entries;
1787 oldcounters = t->private->counters;
1789 tinfo.entries_size = t->table->entries_size;
1790 tinfo.nentries = t->table->nentries;
1791 tinfo.entries = t->table->entries;
1792 oldcounters = t->table->counters;
1795 if (copy_from_user(&tmp, user, sizeof(tmp)))
1798 if (tmp.nentries != tinfo.nentries ||
1799 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1802 memcpy(&repl, &tmp, sizeof(repl));
1803 if (cmd == EBT_SO_GET_ENTRIES)
1804 ret = compat_table_info(t->private, &repl);
1806 ret = compat_table_info(&tinfo, &repl);
1810 if (*len != sizeof(tmp) + repl.entries_size +
1811 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1812 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1813 *len, tinfo.entries_size, repl.entries_size);
1817 /* userspace might not need the counters */
1818 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1819 tmp.num_counters, tinfo.nentries);
1823 pos = compat_ptr(tmp.entries);
1824 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1825 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1828 struct ebt_entries_buf_state {
1829 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1830 u32 buf_kern_len; /* total size of kernel buffer */
1831 u32 buf_kern_offset; /* amount of data copied so far */
1832 u32 buf_user_offset; /* read position in userspace buffer */
1835 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1837 state->buf_kern_offset += sz;
1838 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1841 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1842 void *data, unsigned int sz)
1844 if (state->buf_kern_start == NULL)
1847 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1849 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1852 state->buf_user_offset += sz;
1853 return ebt_buf_count(state, sz);
1856 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1858 char *b = state->buf_kern_start;
1860 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1862 if (b != NULL && sz > 0)
1863 memset(b + state->buf_kern_offset, 0, sz);
1864 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1865 return ebt_buf_count(state, sz);
1874 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1875 enum compat_mwt compat_mwt,
1876 struct ebt_entries_buf_state *state,
1877 const unsigned char *base)
1879 char name[EBT_FUNCTION_MAXNAMELEN];
1880 struct xt_match *match;
1881 struct xt_target *wt;
1883 int off, pad = 0, ret = 0;
1884 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1886 strlcpy(name, mwt->u.name, sizeof(name));
1888 if (state->buf_kern_start)
1889 dst = state->buf_kern_start + state->buf_kern_offset;
1891 entry_offset = (unsigned char *) mwt - base;
1892 switch (compat_mwt) {
1893 case EBT_COMPAT_MATCH:
1894 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1895 name, 0), "ebt_%s", name);
1899 return PTR_ERR(match);
1901 off = ebt_compat_match_offset(match, match_size);
1903 if (match->compat_from_user)
1904 match->compat_from_user(dst, mwt->data);
1906 memcpy(dst, mwt->data, match_size);
1909 size_kern = match->matchsize;
1910 if (unlikely(size_kern == -1))
1911 size_kern = match_size;
1912 module_put(match->me);
1914 case EBT_COMPAT_WATCHER: /* fallthrough */
1915 case EBT_COMPAT_TARGET:
1916 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1917 name, 0), "ebt_%s", name);
1922 off = xt_compat_target_offset(wt);
1925 if (wt->compat_from_user)
1926 wt->compat_from_user(dst, mwt->data);
1928 memcpy(dst, mwt->data, match_size);
1931 size_kern = wt->targetsize;
1937 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1938 off + ebt_compat_entry_padsize());
1943 state->buf_kern_offset += match_size + off;
1944 state->buf_user_offset += match_size;
1945 pad = XT_ALIGN(size_kern) - size_kern;
1947 if (pad > 0 && dst) {
1948 BUG_ON(state->buf_kern_len <= pad);
1949 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1950 memset(dst + size_kern, 0, pad);
1952 return off + match_size;
1956 * return size of all matches, watchers or target, including necessary
1957 * alignment and padding.
1959 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1960 unsigned int size_left, enum compat_mwt type,
1961 struct ebt_entries_buf_state *state, const void *base)
1969 buf = (char *) match32;
1971 while (size_left >= sizeof(*match32)) {
1972 struct ebt_entry_match *match_kern;
1975 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1978 tmp = state->buf_kern_start + state->buf_kern_offset;
1979 match_kern = (struct ebt_entry_match *) tmp;
1981 ret = ebt_buf_add(state, buf, sizeof(*match32));
1984 size_left -= sizeof(*match32);
1986 /* add padding before match->data (if any) */
1987 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1991 if (match32->match_size > size_left)
1994 size_left -= match32->match_size;
1996 ret = compat_mtw_from_user(match32, type, state, base);
2000 BUG_ON(ret < match32->match_size);
2001 growth += ret - match32->match_size;
2002 growth += ebt_compat_entry_padsize();
2004 buf += sizeof(*match32);
2005 buf += match32->match_size;
2008 match_kern->match_size = ret;
2010 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2011 match32 = (struct compat_ebt_entry_mwt *) buf;
2017 #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2021 struct compat_ebt_entry_mwt *__watcher; \
2023 for (__i = e->watchers_offset; \
2024 __i < (e)->target_offset; \
2025 __i += __watcher->watcher_size + \
2026 sizeof(struct compat_ebt_entry_mwt)) { \
2027 __watcher = (void *)(e) + __i; \
2028 __ret = fn(__watcher , ## args); \
2033 if (__i != (e)->target_offset) \
2039 #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2043 struct compat_ebt_entry_mwt *__match; \
2045 for (__i = sizeof(struct ebt_entry); \
2046 __i < (e)->watchers_offset; \
2047 __i += __match->match_size + \
2048 sizeof(struct compat_ebt_entry_mwt)) { \
2049 __match = (void *)(e) + __i; \
2050 __ret = fn(__match , ## args); \
2055 if (__i != (e)->watchers_offset) \
2061 /* called for all ebt_entry structures. */
2062 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2063 unsigned int *total,
2064 struct ebt_entries_buf_state *state)
2066 unsigned int i, j, startoff, new_offset = 0;
2067 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2068 unsigned int offsets[4];
2069 unsigned int *offsets_update = NULL;
2073 if (*total < sizeof(struct ebt_entries))
2076 if (!entry->bitmask) {
2077 *total -= sizeof(struct ebt_entries);
2078 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2080 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2083 startoff = state->buf_user_offset;
2084 /* pull in most part of ebt_entry, it does not need to be changed. */
2085 ret = ebt_buf_add(state, entry,
2086 offsetof(struct ebt_entry, watchers_offset));
2090 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2091 memcpy(&offsets[1], &entry->watchers_offset,
2092 sizeof(offsets) - sizeof(offsets[0]));
2094 if (state->buf_kern_start) {
2095 buf_start = state->buf_kern_start + state->buf_kern_offset;
2096 offsets_update = (unsigned int *) buf_start;
2098 ret = ebt_buf_add(state, &offsets[1],
2099 sizeof(offsets) - sizeof(offsets[0]));
2102 buf_start = (char *) entry;
2104 * 0: matches offset, always follows ebt_entry.
2105 * 1: watchers offset, from ebt_entry structure
2106 * 2: target offset, from ebt_entry structure
2107 * 3: next ebt_entry offset, from ebt_entry structure
2109 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2111 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2112 struct compat_ebt_entry_mwt *match32;
2114 char *buf = buf_start;
2116 buf = buf_start + offsets[i];
2117 if (offsets[i] > offsets[j])
2120 match32 = (struct compat_ebt_entry_mwt *) buf;
2121 size = offsets[j] - offsets[i];
2122 ret = ebt_size_mwt(match32, size, i, state, base);
2126 if (offsets_update && new_offset) {
2127 pr_debug("change offset %d to %d\n",
2128 offsets_update[i], offsets[j] + new_offset);
2129 offsets_update[i] = offsets[j] + new_offset;
2133 startoff = state->buf_user_offset - startoff;
2135 BUG_ON(*total < startoff);
2141 * repl->entries_size is the size of the ebt_entry blob in userspace.
2142 * It might need more memory when copied to a 64 bit kernel in case
2143 * userspace is 32-bit. So, first task: find out how much memory is needed.
2145 * Called before validation is performed.
2147 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2148 struct ebt_entries_buf_state *state)
2150 unsigned int size_remaining = size_user;
2153 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2154 &size_remaining, state);
2158 WARN_ON(size_remaining);
2159 return state->buf_kern_offset;
2163 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2164 void __user *user, unsigned int len)
2166 struct compat_ebt_replace tmp;
2169 if (len < sizeof(tmp))
2172 if (copy_from_user(&tmp, user, sizeof(tmp)))
2175 if (len != sizeof(tmp) + tmp.entries_size)
2178 if (tmp.entries_size == 0)
2181 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2182 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2184 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2187 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2189 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2190 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2191 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2193 repl->num_counters = tmp.num_counters;
2194 repl->counters = compat_ptr(tmp.counters);
2195 repl->entries = compat_ptr(tmp.entries);
2199 static int compat_do_replace(struct net *net, void __user *user,
2202 int ret, i, countersize, size64;
2203 struct ebt_table_info *newinfo;
2204 struct ebt_replace tmp;
2205 struct ebt_entries_buf_state state;
2208 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2210 /* try real handler in case userland supplied needed padding */
2211 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2216 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2217 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2222 memset(newinfo->counters, 0, countersize);
2224 memset(&state, 0, sizeof(state));
2226 newinfo->entries = vmalloc(tmp.entries_size);
2227 if (!newinfo->entries) {
2232 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2237 entries_tmp = newinfo->entries;
2239 xt_compat_lock(NFPROTO_BRIDGE);
2241 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2245 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2246 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2247 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2250 newinfo->entries = vmalloc(size64);
2251 if (!newinfo->entries) {
2257 memset(&state, 0, sizeof(state));
2258 state.buf_kern_start = newinfo->entries;
2259 state.buf_kern_len = size64;
2261 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2262 BUG_ON(ret < 0); /* parses same data again */
2265 tmp.entries_size = size64;
2267 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2268 char __user *usrptr;
2269 if (tmp.hook_entry[i]) {
2271 usrptr = (char __user *) tmp.hook_entry[i];
2272 delta = usrptr - tmp.entries;
2273 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2274 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2278 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2279 xt_compat_unlock(NFPROTO_BRIDGE);
2281 ret = do_replace_finish(net, &tmp, newinfo);
2285 vfree(newinfo->entries);
2290 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2291 xt_compat_unlock(NFPROTO_BRIDGE);
2295 static int compat_update_counters(struct net *net, void __user *user,
2298 struct compat_ebt_replace hlp;
2300 if (copy_from_user(&hlp, user, sizeof(hlp)))
2303 /* try real handler in case userland supplied needed padding */
2304 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2305 return update_counters(net, user, len);
2307 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2308 hlp.num_counters, user, len);
2311 static int compat_do_ebt_set_ctl(struct sock *sk,
2312 int cmd, void __user *user, unsigned int len)
2316 if (!capable(CAP_NET_ADMIN))
2320 case EBT_SO_SET_ENTRIES:
2321 ret = compat_do_replace(sock_net(sk), user, len);
2323 case EBT_SO_SET_COUNTERS:
2324 ret = compat_update_counters(sock_net(sk), user, len);
2332 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2333 void __user *user, int *len)
2336 struct compat_ebt_replace tmp;
2337 struct ebt_table *t;
2339 if (!capable(CAP_NET_ADMIN))
2342 /* try real handler in case userland supplied needed padding */
2343 if ((cmd == EBT_SO_GET_INFO ||
2344 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2345 return do_ebt_get_ctl(sk, cmd, user, len);
2347 if (copy_from_user(&tmp, user, sizeof(tmp)))
2350 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2354 xt_compat_lock(NFPROTO_BRIDGE);
2356 case EBT_SO_GET_INFO:
2357 tmp.nentries = t->private->nentries;
2358 ret = compat_table_info(t->private, &tmp);
2361 tmp.valid_hooks = t->valid_hooks;
2363 if (copy_to_user(user, &tmp, *len) != 0) {
2369 case EBT_SO_GET_INIT_INFO:
2370 tmp.nentries = t->table->nentries;
2371 tmp.entries_size = t->table->entries_size;
2372 tmp.valid_hooks = t->table->valid_hooks;
2374 if (copy_to_user(user, &tmp, *len) != 0) {
2380 case EBT_SO_GET_ENTRIES:
2381 case EBT_SO_GET_INIT_ENTRIES:
2383 * try real handler first in case of userland-side padding.
2384 * in case we are dealing with an 'ordinary' 32 bit binary
2385 * without 64bit compatibility padding, this will fail right
2386 * after copy_from_user when the *len argument is validated.
2388 * the compat_ variant needs to do one pass over the kernel
2389 * data set to adjust for size differences before it the check.
2391 if (copy_everything_to_user(t, user, len, cmd) == 0)
2394 ret = compat_copy_everything_to_user(t, user, len, cmd);
2400 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2401 xt_compat_unlock(NFPROTO_BRIDGE);
2402 mutex_unlock(&ebt_mutex);
2407 static struct nf_sockopt_ops ebt_sockopts =
2410 .set_optmin = EBT_BASE_CTL,
2411 .set_optmax = EBT_SO_SET_MAX + 1,
2412 .set = do_ebt_set_ctl,
2413 #ifdef CONFIG_COMPAT
2414 .compat_set = compat_do_ebt_set_ctl,
2416 .get_optmin = EBT_BASE_CTL,
2417 .get_optmax = EBT_SO_GET_MAX + 1,
2418 .get = do_ebt_get_ctl,
2419 #ifdef CONFIG_COMPAT
2420 .compat_get = compat_do_ebt_get_ctl,
2422 .owner = THIS_MODULE,
2425 static int __init ebtables_init(void)
2429 ret = xt_register_target(&ebt_standard_target);
2432 ret = nf_register_sockopt(&ebt_sockopts);
2434 xt_unregister_target(&ebt_standard_target);
2438 printk(KERN_INFO "Ebtables v2.0 registered\n");
2442 static void __exit ebtables_fini(void)
2444 nf_unregister_sockopt(&ebt_sockopts);
2445 xt_unregister_target(&ebt_standard_target);
2446 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2449 EXPORT_SYMBOL(ebt_register_table);
2450 EXPORT_SYMBOL(ebt_unregister_table);
2451 EXPORT_SYMBOL(ebt_do_table);
2452 module_init(ebtables_init);
2453 module_exit(ebtables_fini);
2454 MODULE_LICENSE("GPL");