5 * Bart De Schuymer <bdschuym@pandora.be>
7 * ebtables.c,v 2.0, July, 2002
9 * This code is strongly inspired by the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
29 #include <linux/audit.h>
31 /* needed for logical [in,out]-dev filtering */
32 #include "../br_private.h"
34 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
35 "report to author: "format, ## args)
36 /* #define BUGPRINT(format, args...) */
39 * Each cpu has its own set of counters, so there is no need for write_lock in
41 * For reading or updating the counters, the user context needs to
45 /* The size of each set of counters is altered to get cache alignment */
46 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
47 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
48 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
49 COUNTER_OFFSET(n) * cpu))
53 static DEFINE_MUTEX(ebt_mutex);
56 static void ebt_standard_compat_from_user(void *dst, const void *src)
58 int v = *(compat_int_t *)src;
61 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
62 memcpy(dst, &v, sizeof(v));
65 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
67 compat_int_t cv = *(int *)src;
70 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
71 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
76 static struct xt_target ebt_standard_target = {
79 .family = NFPROTO_BRIDGE,
80 .targetsize = sizeof(int),
82 .compatsize = sizeof(compat_int_t),
83 .compat_from_user = ebt_standard_compat_from_user,
84 .compat_to_user = ebt_standard_compat_to_user,
89 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
90 struct xt_action_param *par)
92 par->target = w->u.watcher;
93 par->targinfo = w->data;
94 w->u.watcher->target(skb, par);
95 /* watchers don't give a verdict */
100 ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
101 struct xt_action_param *par)
103 par->match = m->u.match;
104 par->matchinfo = m->data;
105 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
109 ebt_dev_check(const char *entry, const struct net_device *device)
118 devname = device->name;
119 /* 1 is the wildcard token */
120 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
122 return devname[i] != entry[i] && entry[i] != 1;
125 #define FWINV2(bool, invflg) ((bool) ^ !!(e->invflags & invflg))
126 /* process standard matches */
128 ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
129 const struct net_device *in, const struct net_device *out)
131 const struct ethhdr *h = eth_hdr(skb);
132 const struct net_bridge_port *p;
136 if (skb_vlan_tag_present(skb))
137 ethproto = htons(ETH_P_8021Q);
139 ethproto = h->h_proto;
141 if (e->bitmask & EBT_802_3) {
142 if (FWINV2(eth_proto_is_802_3(ethproto), EBT_IPROTO))
144 } else if (!(e->bitmask & EBT_NOPROTO) &&
145 FWINV2(e->ethproto != ethproto, EBT_IPROTO))
148 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
150 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
152 /* rcu_read_lock()ed by nf_hook_slow */
153 if (in && (p = br_port_get_rcu(in)) != NULL &&
154 FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
156 if (out && (p = br_port_get_rcu(out)) != NULL &&
157 FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
160 if (e->bitmask & EBT_SOURCEMAC) {
162 for (i = 0; i < 6; i++)
163 verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
165 if (FWINV2(verdict != 0, EBT_ISOURCE) )
168 if (e->bitmask & EBT_DESTMAC) {
170 for (i = 0; i < 6; i++)
171 verdict |= (h->h_dest[i] ^ e->destmac[i]) &
173 if (FWINV2(verdict != 0, EBT_IDEST) )
180 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
182 return (void *)entry + entry->next_offset;
185 /* Do some firewalling */
186 unsigned int ebt_do_table(struct sk_buff *skb,
187 const struct nf_hook_state *state,
188 struct ebt_table *table)
190 unsigned int hook = state->hook;
192 struct ebt_entry *point;
193 struct ebt_counter *counter_base, *cb_base;
194 const struct ebt_entry_target *t;
196 struct ebt_chainstack *cs;
197 struct ebt_entries *chaininfo;
199 const struct ebt_table_info *private;
200 struct xt_action_param acpar;
202 acpar.family = NFPROTO_BRIDGE;
203 acpar.net = state->net;
204 acpar.in = state->in;
205 acpar.out = state->out;
206 acpar.hotdrop = false;
207 acpar.hooknum = hook;
209 read_lock_bh(&table->lock);
210 private = table->private;
211 cb_base = COUNTER_BASE(private->counters, private->nentries,
213 if (private->chainstack)
214 cs = private->chainstack[smp_processor_id()];
217 chaininfo = private->hook_entry[hook];
218 nentries = private->hook_entry[hook]->nentries;
219 point = (struct ebt_entry *)(private->hook_entry[hook]->data);
220 counter_base = cb_base + private->hook_entry[hook]->counter_offset;
221 /* base for chain jumps */
222 base = private->entries;
224 while (i < nentries) {
225 if (ebt_basic_match(point, skb, state->in, state->out))
228 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
231 read_unlock_bh(&table->lock);
235 /* increase counter */
236 (*(counter_base + i)).pcnt++;
237 (*(counter_base + i)).bcnt += skb->len;
239 /* these should only watch: not modify, nor tell us
240 what to do with the packet */
241 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
243 t = (struct ebt_entry_target *)
244 (((char *)point) + point->target_offset);
245 /* standard target */
246 if (!t->u.target->target)
247 verdict = ((struct ebt_standard_target *)t)->verdict;
249 acpar.target = t->u.target;
250 acpar.targinfo = t->data;
251 verdict = t->u.target->target(skb, &acpar);
253 if (verdict == EBT_ACCEPT) {
254 read_unlock_bh(&table->lock);
257 if (verdict == EBT_DROP) {
258 read_unlock_bh(&table->lock);
261 if (verdict == EBT_RETURN) {
263 #ifdef CONFIG_NETFILTER_DEBUG
265 BUGPRINT("RETURN on base chain");
266 /* act like this is EBT_CONTINUE */
271 /* put all the local variables right */
273 chaininfo = cs[sp].chaininfo;
274 nentries = chaininfo->nentries;
276 counter_base = cb_base +
277 chaininfo->counter_offset;
280 if (verdict == EBT_CONTINUE)
282 #ifdef CONFIG_NETFILTER_DEBUG
284 BUGPRINT("bogus standard verdict\n");
285 read_unlock_bh(&table->lock);
291 cs[sp].chaininfo = chaininfo;
292 cs[sp].e = ebt_next_entry(point);
294 chaininfo = (struct ebt_entries *) (base + verdict);
295 #ifdef CONFIG_NETFILTER_DEBUG
296 if (chaininfo->distinguisher) {
297 BUGPRINT("jump to non-chain\n");
298 read_unlock_bh(&table->lock);
302 nentries = chaininfo->nentries;
303 point = (struct ebt_entry *)chaininfo->data;
304 counter_base = cb_base + chaininfo->counter_offset;
308 point = ebt_next_entry(point);
312 /* I actually like this :) */
313 if (chaininfo->policy == EBT_RETURN)
315 if (chaininfo->policy == EBT_ACCEPT) {
316 read_unlock_bh(&table->lock);
319 read_unlock_bh(&table->lock);
323 /* If it succeeds, returns element and locks mutex */
325 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
329 struct list_head list;
330 char name[EBT_FUNCTION_MAXNAMELEN];
334 list_for_each_entry(e, head, list) {
335 if (strcmp(e->name, name) == 0)
344 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
345 int *error, struct mutex *mutex)
347 return try_then_request_module(
348 find_inlist_lock_noload(head, name, error, mutex),
349 "%s%s", prefix, name);
352 static inline struct ebt_table *
353 find_table_lock(struct net *net, const char *name, int *error,
356 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
357 "ebtable_", error, mutex);
361 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
364 const struct ebt_entry *e = par->entryinfo;
365 struct xt_match *match;
366 size_t left = ((char *)e + e->watchers_offset) - (char *)m;
369 if (left < sizeof(struct ebt_entry_match) ||
370 left - sizeof(struct ebt_entry_match) < m->match_size)
373 match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
375 return PTR_ERR(match);
379 par->matchinfo = m->data;
380 ret = xt_check_match(par, m->match_size,
381 e->ethproto, e->invflags & EBT_IPROTO);
383 module_put(match->me);
392 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
395 const struct ebt_entry *e = par->entryinfo;
396 struct xt_target *watcher;
397 size_t left = ((char *)e + e->target_offset) - (char *)w;
400 if (left < sizeof(struct ebt_entry_watcher) ||
401 left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
404 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
406 return PTR_ERR(watcher);
408 if (watcher->family != NFPROTO_BRIDGE) {
409 module_put(watcher->me);
413 w->u.watcher = watcher;
415 par->target = watcher;
416 par->targinfo = w->data;
417 ret = xt_check_target(par, w->watcher_size,
418 e->ethproto, e->invflags & EBT_IPROTO);
420 module_put(watcher->me);
428 static int ebt_verify_pointers(const struct ebt_replace *repl,
429 struct ebt_table_info *newinfo)
431 unsigned int limit = repl->entries_size;
432 unsigned int valid_hooks = repl->valid_hooks;
433 unsigned int offset = 0;
436 for (i = 0; i < NF_BR_NUMHOOKS; i++)
437 newinfo->hook_entry[i] = NULL;
439 newinfo->entries_size = repl->entries_size;
440 newinfo->nentries = repl->nentries;
442 while (offset < limit) {
443 size_t left = limit - offset;
444 struct ebt_entry *e = (void *)newinfo->entries + offset;
446 if (left < sizeof(unsigned int))
449 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
450 if ((valid_hooks & (1 << i)) == 0)
452 if ((char __user *)repl->hook_entry[i] ==
453 repl->entries + offset)
457 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
458 if (e->bitmask != 0) {
459 /* we make userspace set this right,
460 so there is no misunderstanding */
461 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
462 "in distinguisher\n");
465 if (i != NF_BR_NUMHOOKS)
466 newinfo->hook_entry[i] = (struct ebt_entries *)e;
467 if (left < sizeof(struct ebt_entries))
469 offset += sizeof(struct ebt_entries);
471 if (left < sizeof(struct ebt_entry))
473 if (left < e->next_offset)
475 if (e->next_offset < sizeof(struct ebt_entry))
477 offset += e->next_offset;
480 if (offset != limit) {
481 BUGPRINT("entries_size too small\n");
485 /* check if all valid hooks have a chain */
486 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
487 if (!newinfo->hook_entry[i] &&
488 (valid_hooks & (1 << i))) {
489 BUGPRINT("Valid hook without chain\n");
497 * this one is very careful, as it is the first function
498 * to parse the userspace data
501 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
502 const struct ebt_table_info *newinfo,
503 unsigned int *n, unsigned int *cnt,
504 unsigned int *totalcnt, unsigned int *udc_cnt)
508 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
509 if ((void *)e == (void *)newinfo->hook_entry[i])
512 /* beginning of a new chain
513 if i == NF_BR_NUMHOOKS it must be a user defined chain */
514 if (i != NF_BR_NUMHOOKS || !e->bitmask) {
515 /* this checks if the previous chain has as many entries
518 BUGPRINT("nentries does not equal the nr of entries "
522 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
523 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
524 /* only RETURN from udc */
525 if (i != NF_BR_NUMHOOKS ||
526 ((struct ebt_entries *)e)->policy != EBT_RETURN) {
527 BUGPRINT("bad policy\n");
531 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
533 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
534 BUGPRINT("counter_offset != totalcnt");
537 *n = ((struct ebt_entries *)e)->nentries;
541 /* a plain old entry, heh */
542 if (sizeof(struct ebt_entry) > e->watchers_offset ||
543 e->watchers_offset > e->target_offset ||
544 e->target_offset >= e->next_offset) {
545 BUGPRINT("entry offsets not in right order\n");
548 /* this is not checked anywhere else */
549 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
550 BUGPRINT("target size too small\n");
560 struct ebt_chainstack cs;
562 unsigned int hookmask;
566 * we need these positions to check that the jumps to a different part of the
567 * entries is a jump to the beginning of a new chain.
570 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
571 unsigned int *n, struct ebt_cl_stack *udc)
575 /* we're only interested in chain starts */
578 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
579 if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
582 /* only care about udc */
583 if (i != NF_BR_NUMHOOKS)
586 udc[*n].cs.chaininfo = (struct ebt_entries *)e;
587 /* these initialisations are depended on later in check_chainloops() */
589 udc[*n].hookmask = 0;
596 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
598 struct xt_mtdtor_param par;
600 if (i && (*i)-- == 0)
604 par.match = m->u.match;
605 par.matchinfo = m->data;
606 par.family = NFPROTO_BRIDGE;
607 if (par.match->destroy != NULL)
608 par.match->destroy(&par);
609 module_put(par.match->me);
614 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
616 struct xt_tgdtor_param par;
618 if (i && (*i)-- == 0)
622 par.target = w->u.watcher;
623 par.targinfo = w->data;
624 par.family = NFPROTO_BRIDGE;
625 if (par.target->destroy != NULL)
626 par.target->destroy(&par);
627 module_put(par.target->me);
632 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
634 struct xt_tgdtor_param par;
635 struct ebt_entry_target *t;
640 if (cnt && (*cnt)-- == 0)
642 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
643 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
644 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
647 par.target = t->u.target;
648 par.targinfo = t->data;
649 par.family = NFPROTO_BRIDGE;
650 if (par.target->destroy != NULL)
651 par.target->destroy(&par);
652 module_put(par.target->me);
657 ebt_check_entry(struct ebt_entry *e, struct net *net,
658 const struct ebt_table_info *newinfo,
659 const char *name, unsigned int *cnt,
660 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
662 struct ebt_entry_target *t;
663 struct xt_target *target;
664 unsigned int i, j, hook = 0, hookmask = 0;
667 struct xt_mtchk_param mtpar;
668 struct xt_tgchk_param tgpar;
670 /* don't mess with the struct ebt_entries */
674 if (e->bitmask & ~EBT_F_MASK) {
675 BUGPRINT("Unknown flag for bitmask\n");
678 if (e->invflags & ~EBT_INV_MASK) {
679 BUGPRINT("Unknown flag for inv bitmask\n");
682 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
683 BUGPRINT("NOPROTO & 802_3 not allowed\n");
686 /* what hook do we belong to? */
687 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
688 if (!newinfo->hook_entry[i])
690 if ((char *)newinfo->hook_entry[i] < (char *)e)
695 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
697 if (i < NF_BR_NUMHOOKS)
698 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
700 for (i = 0; i < udc_cnt; i++)
701 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
704 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
706 hookmask = cl_s[i - 1].hookmask;
710 memset(&mtpar, 0, sizeof(mtpar));
711 memset(&tgpar, 0, sizeof(tgpar));
712 mtpar.net = tgpar.net = net;
713 mtpar.table = tgpar.table = name;
714 mtpar.entryinfo = tgpar.entryinfo = e;
715 mtpar.hook_mask = tgpar.hook_mask = hookmask;
716 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
717 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
719 goto cleanup_matches;
721 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
723 goto cleanup_watchers;
724 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
725 gap = e->next_offset - e->target_offset;
727 target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
728 if (IS_ERR(target)) {
729 ret = PTR_ERR(target);
730 goto cleanup_watchers;
733 /* Reject UNSPEC, xtables verdicts/return values are incompatible */
734 if (target->family != NFPROTO_BRIDGE) {
735 module_put(target->me);
737 goto cleanup_watchers;
740 t->u.target = target;
741 if (t->u.target == &ebt_standard_target) {
742 if (gap < sizeof(struct ebt_standard_target)) {
743 BUGPRINT("Standard target size too big\n");
745 goto cleanup_watchers;
747 if (((struct ebt_standard_target *)t)->verdict <
748 -NUM_STANDARD_TARGETS) {
749 BUGPRINT("Invalid standard target\n");
751 goto cleanup_watchers;
753 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
754 module_put(t->u.target->me);
756 goto cleanup_watchers;
759 tgpar.target = target;
760 tgpar.targinfo = t->data;
761 ret = xt_check_target(&tgpar, t->target_size,
762 e->ethproto, e->invflags & EBT_IPROTO);
764 module_put(target->me);
765 goto cleanup_watchers;
770 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
772 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
777 * checks for loops and sets the hook mask for udc
778 * the hook mask for udc tells us from which base chains the udc can be
779 * accessed. This mask is a parameter to the check() functions of the extensions
781 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
782 unsigned int udc_cnt, unsigned int hooknr, char *base)
784 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
785 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
786 const struct ebt_entry_target *t;
788 while (pos < nentries || chain_nr != -1) {
789 /* end of udc, go back one 'recursion' step */
790 if (pos == nentries) {
791 /* put back values of the time when this chain was called */
792 e = cl_s[chain_nr].cs.e;
793 if (cl_s[chain_nr].from != -1)
795 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
797 nentries = chain->nentries;
798 pos = cl_s[chain_nr].cs.n;
799 /* make sure we won't see a loop that isn't one */
800 cl_s[chain_nr].cs.n = 0;
801 chain_nr = cl_s[chain_nr].from;
805 t = (struct ebt_entry_target *)
806 (((char *)e) + e->target_offset);
807 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
809 if (e->target_offset + sizeof(struct ebt_standard_target) >
811 BUGPRINT("Standard target size too big\n");
814 verdict = ((struct ebt_standard_target *)t)->verdict;
815 if (verdict >= 0) { /* jump to another chain */
816 struct ebt_entries *hlp2 =
817 (struct ebt_entries *)(base + verdict);
818 for (i = 0; i < udc_cnt; i++)
819 if (hlp2 == cl_s[i].cs.chaininfo)
821 /* bad destination or loop */
823 BUGPRINT("bad destination\n");
830 if (cl_s[i].hookmask & (1 << hooknr))
832 /* this can't be 0, so the loop test is correct */
833 cl_s[i].cs.n = pos + 1;
835 cl_s[i].cs.e = ebt_next_entry(e);
836 e = (struct ebt_entry *)(hlp2->data);
837 nentries = hlp2->nentries;
838 cl_s[i].from = chain_nr;
840 /* this udc is accessible from the base chain for hooknr */
841 cl_s[i].hookmask |= (1 << hooknr);
845 e = ebt_next_entry(e);
851 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
852 static int translate_table(struct net *net, const char *name,
853 struct ebt_table_info *newinfo)
855 unsigned int i, j, k, udc_cnt;
857 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
860 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
862 if (i == NF_BR_NUMHOOKS) {
863 BUGPRINT("No valid hooks specified\n");
866 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
867 BUGPRINT("Chains don't start at beginning\n");
870 /* make sure chains are ordered after each other in same order
871 as their corresponding hooks */
872 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
873 if (!newinfo->hook_entry[j])
875 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
876 BUGPRINT("Hook order must be followed\n");
882 /* do some early checkings and initialize some things */
883 i = 0; /* holds the expected nr. of entries for the chain */
884 j = 0; /* holds the up to now counted entries for the chain */
885 k = 0; /* holds the total nr. of entries, should equal
886 newinfo->nentries afterwards */
887 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
888 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
889 ebt_check_entry_size_and_hooks, newinfo,
890 &i, &j, &k, &udc_cnt);
896 BUGPRINT("nentries does not equal the nr of entries in the "
900 if (k != newinfo->nentries) {
901 BUGPRINT("Total nentries is wrong\n");
905 /* get the location of the udc, put them in an array
906 while we're at it, allocate the chainstack */
908 /* this will get free'd in do_replace()/ebt_register_table()
909 if an error occurs */
910 newinfo->chainstack =
911 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
912 if (!newinfo->chainstack)
914 for_each_possible_cpu(i) {
915 newinfo->chainstack[i] =
916 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
917 if (!newinfo->chainstack[i]) {
919 vfree(newinfo->chainstack[--i]);
920 vfree(newinfo->chainstack);
921 newinfo->chainstack = NULL;
926 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
929 i = 0; /* the i'th udc */
930 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
931 ebt_get_udc_positions, newinfo, &i, cl_s);
934 BUGPRINT("i != udc_cnt\n");
940 /* Check for loops */
941 for (i = 0; i < NF_BR_NUMHOOKS; i++)
942 if (newinfo->hook_entry[i])
943 if (check_chainloops(newinfo->hook_entry[i],
944 cl_s, udc_cnt, i, newinfo->entries)) {
949 /* we now know the following (along with E=mc²):
950 - the nr of entries in each chain is right
951 - the size of the allocated space is right
952 - all valid hooks have a corresponding chain
954 - wrong data can still be on the level of a single entry
955 - could be there are jumps to places that are not the
956 beginning of a chain. This can only occur in chains that
957 are not accessible from any base chains, so we don't care. */
959 /* used to know what we need to clean up if something goes wrong */
961 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
962 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
964 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
965 ebt_cleanup_entry, net, &i);
971 /* called under write_lock */
972 static void get_counters(const struct ebt_counter *oldcounters,
973 struct ebt_counter *counters, unsigned int nentries)
976 struct ebt_counter *counter_base;
978 /* counters of cpu 0 */
979 memcpy(counters, oldcounters,
980 sizeof(struct ebt_counter) * nentries);
982 /* add other counters to those of cpu 0 */
983 for_each_possible_cpu(cpu) {
986 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
987 for (i = 0; i < nentries; i++) {
988 counters[i].pcnt += counter_base[i].pcnt;
989 counters[i].bcnt += counter_base[i].bcnt;
994 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
995 struct ebt_table_info *newinfo)
998 struct ebt_counter *counterstmp = NULL;
999 /* used to be able to unlock earlier */
1000 struct ebt_table_info *table;
1001 struct ebt_table *t;
1003 /* the user wants counters back
1004 the check on the size is done later, when we have the lock */
1005 if (repl->num_counters) {
1006 unsigned long size = repl->num_counters * sizeof(*counterstmp);
1007 counterstmp = vmalloc(size);
1012 newinfo->chainstack = NULL;
1013 ret = ebt_verify_pointers(repl, newinfo);
1015 goto free_counterstmp;
1017 ret = translate_table(net, repl->name, newinfo);
1020 goto free_counterstmp;
1022 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1028 /* the table doesn't like it */
1029 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1032 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1033 BUGPRINT("Wrong nr. of counters requested\n");
1038 /* we have the mutex lock, so no danger in reading this pointer */
1040 /* make sure the table can only be rmmod'ed if it contains no rules */
1041 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1044 } else if (table->nentries && !newinfo->nentries)
1046 /* we need an atomic snapshot of the counters */
1047 write_lock_bh(&t->lock);
1048 if (repl->num_counters)
1049 get_counters(t->private->counters, counterstmp,
1050 t->private->nentries);
1052 t->private = newinfo;
1053 write_unlock_bh(&t->lock);
1054 mutex_unlock(&ebt_mutex);
1055 /* so, a user can change the chains while having messed up her counter
1056 allocation. Only reason why this is done is because this way the lock
1057 is held only once, while this doesn't bring the kernel into a
1059 if (repl->num_counters &&
1060 copy_to_user(repl->counters, counterstmp,
1061 repl->num_counters * sizeof(struct ebt_counter))) {
1062 /* Silent error, can't fail, new table is already in place */
1063 net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
1066 /* decrease module count and free resources */
1067 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1068 ebt_cleanup_entry, net, NULL);
1070 vfree(table->entries);
1071 if (table->chainstack) {
1072 for_each_possible_cpu(i)
1073 vfree(table->chainstack[i]);
1074 vfree(table->chainstack);
1081 if (audit_enabled) {
1082 struct audit_buffer *ab;
1084 ab = audit_log_start(current->audit_context, GFP_KERNEL,
1085 AUDIT_NETFILTER_CFG);
1087 audit_log_format(ab, "table=%s family=%u entries=%u",
1088 repl->name, AF_BRIDGE, repl->nentries);
1096 mutex_unlock(&ebt_mutex);
1098 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1099 ebt_cleanup_entry, net, NULL);
1102 /* can be initialized in translate_table() */
1103 if (newinfo->chainstack) {
1104 for_each_possible_cpu(i)
1105 vfree(newinfo->chainstack[i]);
1106 vfree(newinfo->chainstack);
1111 /* replace the table */
1112 static int do_replace(struct net *net, const void __user *user,
1115 int ret, countersize;
1116 struct ebt_table_info *newinfo;
1117 struct ebt_replace tmp;
1119 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1122 if (len != sizeof(tmp) + tmp.entries_size) {
1123 BUGPRINT("Wrong len argument\n");
1127 if (tmp.entries_size == 0) {
1128 BUGPRINT("Entries_size never zero\n");
1131 /* overflow check */
1132 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1133 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1135 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1138 tmp.name[sizeof(tmp.name) - 1] = 0;
1140 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1141 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1146 memset(newinfo->counters, 0, countersize);
1148 newinfo->entries = vmalloc(tmp.entries_size);
1149 if (!newinfo->entries) {
1154 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1155 BUGPRINT("Couldn't copy entries from userspace\n");
1160 ret = do_replace_finish(net, &tmp, newinfo);
1164 vfree(newinfo->entries);
1171 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1173 struct ebt_table_info *newinfo;
1174 struct ebt_table *t, *table;
1175 struct ebt_replace_kernel *repl;
1176 int ret, i, countersize;
1179 if (input_table == NULL || (repl = input_table->table) == NULL ||
1180 repl->entries == NULL || repl->entries_size == 0 ||
1181 repl->counters != NULL || input_table->private != NULL) {
1182 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1183 return ERR_PTR(-EINVAL);
1186 /* Don't add one table to multiple lists. */
1187 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1193 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1194 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1199 p = vmalloc(repl->entries_size);
1203 memcpy(p, repl->entries, repl->entries_size);
1204 newinfo->entries = p;
1206 newinfo->entries_size = repl->entries_size;
1207 newinfo->nentries = repl->nentries;
1210 memset(newinfo->counters, 0, countersize);
1212 /* fill in newinfo and parse the entries */
1213 newinfo->chainstack = NULL;
1214 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1215 if ((repl->valid_hooks & (1 << i)) == 0)
1216 newinfo->hook_entry[i] = NULL;
1218 newinfo->hook_entry[i] = p +
1219 ((char *)repl->hook_entry[i] - repl->entries);
1221 ret = translate_table(net, repl->name, newinfo);
1223 BUGPRINT("Translate_table failed\n");
1224 goto free_chainstack;
1227 if (table->check && table->check(newinfo, table->valid_hooks)) {
1228 BUGPRINT("The table doesn't like its own initial data, lol\n");
1230 goto free_chainstack;
1233 table->private = newinfo;
1234 rwlock_init(&table->lock);
1235 mutex_lock(&ebt_mutex);
1236 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1237 if (strcmp(t->name, table->name) == 0) {
1239 BUGPRINT("Table name already exists\n");
1244 /* Hold a reference count if the chains aren't empty */
1245 if (newinfo->nentries && !try_module_get(table->me)) {
1249 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1250 mutex_unlock(&ebt_mutex);
1253 mutex_unlock(&ebt_mutex);
1255 if (newinfo->chainstack) {
1256 for_each_possible_cpu(i)
1257 vfree(newinfo->chainstack[i]);
1258 vfree(newinfo->chainstack);
1260 vfree(newinfo->entries);
1266 return ERR_PTR(ret);
1269 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1274 BUGPRINT("Request to unregister NULL table!!!\n");
1277 mutex_lock(&ebt_mutex);
1278 list_del(&table->list);
1279 mutex_unlock(&ebt_mutex);
1280 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1281 ebt_cleanup_entry, net, NULL);
1282 if (table->private->nentries)
1283 module_put(table->me);
1284 vfree(table->private->entries);
1285 if (table->private->chainstack) {
1286 for_each_possible_cpu(i)
1287 vfree(table->private->chainstack[i]);
1288 vfree(table->private->chainstack);
1290 vfree(table->private);
1294 /* userspace just supplied us with counters */
1295 static int do_update_counters(struct net *net, const char *name,
1296 struct ebt_counter __user *counters,
1297 unsigned int num_counters,
1298 const void __user *user, unsigned int len)
1301 struct ebt_counter *tmp;
1302 struct ebt_table *t;
1304 if (num_counters == 0)
1307 tmp = vmalloc(num_counters * sizeof(*tmp));
1311 t = find_table_lock(net, name, &ret, &ebt_mutex);
1315 if (num_counters != t->private->nentries) {
1316 BUGPRINT("Wrong nr of counters\n");
1321 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1326 /* we want an atomic add of the counters */
1327 write_lock_bh(&t->lock);
1329 /* we add to the counters of the first cpu */
1330 for (i = 0; i < num_counters; i++) {
1331 t->private->counters[i].pcnt += tmp[i].pcnt;
1332 t->private->counters[i].bcnt += tmp[i].bcnt;
1335 write_unlock_bh(&t->lock);
1338 mutex_unlock(&ebt_mutex);
1344 static int update_counters(struct net *net, const void __user *user,
1347 struct ebt_replace hlp;
1349 if (copy_from_user(&hlp, user, sizeof(hlp)))
1352 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1355 return do_update_counters(net, hlp.name, hlp.counters,
1356 hlp.num_counters, user, len);
1359 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1360 const char *base, char __user *ubase)
1362 char __user *hlp = ubase + ((char *)m - base);
1363 char name[EBT_FUNCTION_MAXNAMELEN] = {};
1365 /* ebtables expects 32 bytes long names but xt_match names are 29 bytes
1366 long. Copy 29 bytes and fill remaining bytes with zeroes. */
1367 strlcpy(name, m->u.match->name, sizeof(name));
1368 if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1373 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1374 const char *base, char __user *ubase)
1376 char __user *hlp = ubase + ((char *)w - base);
1377 char name[EBT_FUNCTION_MAXNAMELEN] = {};
1379 strlcpy(name, w->u.watcher->name, sizeof(name));
1380 if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
1386 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1390 const struct ebt_entry_target *t;
1391 char name[EBT_FUNCTION_MAXNAMELEN] = {};
1393 if (e->bitmask == 0)
1396 hlp = ubase + (((char *)e + e->target_offset) - base);
1397 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1399 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1402 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1405 strlcpy(name, t->u.target->name, sizeof(name));
1406 if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1411 static int copy_counters_to_user(struct ebt_table *t,
1412 const struct ebt_counter *oldcounters,
1413 void __user *user, unsigned int num_counters,
1414 unsigned int nentries)
1416 struct ebt_counter *counterstmp;
1419 /* userspace might not need the counters */
1420 if (num_counters == 0)
1423 if (num_counters != nentries) {
1424 BUGPRINT("Num_counters wrong\n");
1428 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1432 write_lock_bh(&t->lock);
1433 get_counters(oldcounters, counterstmp, nentries);
1434 write_unlock_bh(&t->lock);
1436 if (copy_to_user(user, counterstmp,
1437 nentries * sizeof(struct ebt_counter)))
1443 /* called with ebt_mutex locked */
1444 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1445 const int *len, int cmd)
1447 struct ebt_replace tmp;
1448 const struct ebt_counter *oldcounters;
1449 unsigned int entries_size, nentries;
1453 if (cmd == EBT_SO_GET_ENTRIES) {
1454 entries_size = t->private->entries_size;
1455 nentries = t->private->nentries;
1456 entries = t->private->entries;
1457 oldcounters = t->private->counters;
1459 entries_size = t->table->entries_size;
1460 nentries = t->table->nentries;
1461 entries = t->table->entries;
1462 oldcounters = t->table->counters;
1465 if (copy_from_user(&tmp, user, sizeof(tmp)))
1468 if (*len != sizeof(struct ebt_replace) + entries_size +
1469 (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
1472 if (tmp.nentries != nentries) {
1473 BUGPRINT("Nentries wrong\n");
1477 if (tmp.entries_size != entries_size) {
1478 BUGPRINT("Wrong size\n");
1482 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1483 tmp.num_counters, nentries);
1487 if (copy_to_user(tmp.entries, entries, entries_size)) {
1488 BUGPRINT("Couldn't copy entries to userspace\n");
1491 /* set the match/watcher/target names right */
1492 return EBT_ENTRY_ITERATE(entries, entries_size,
1493 ebt_make_names, entries, tmp.entries);
1496 static int do_ebt_set_ctl(struct sock *sk,
1497 int cmd, void __user *user, unsigned int len)
1500 struct net *net = sock_net(sk);
1502 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1506 case EBT_SO_SET_ENTRIES:
1507 ret = do_replace(net, user, len);
1509 case EBT_SO_SET_COUNTERS:
1510 ret = update_counters(net, user, len);
1518 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1521 struct ebt_replace tmp;
1522 struct ebt_table *t;
1523 struct net *net = sock_net(sk);
1525 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1528 if (copy_from_user(&tmp, user, sizeof(tmp)))
1531 tmp.name[sizeof(tmp.name) - 1] = '\0';
1533 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
1538 case EBT_SO_GET_INFO:
1539 case EBT_SO_GET_INIT_INFO:
1540 if (*len != sizeof(struct ebt_replace)) {
1542 mutex_unlock(&ebt_mutex);
1545 if (cmd == EBT_SO_GET_INFO) {
1546 tmp.nentries = t->private->nentries;
1547 tmp.entries_size = t->private->entries_size;
1548 tmp.valid_hooks = t->valid_hooks;
1550 tmp.nentries = t->table->nentries;
1551 tmp.entries_size = t->table->entries_size;
1552 tmp.valid_hooks = t->table->valid_hooks;
1554 mutex_unlock(&ebt_mutex);
1555 if (copy_to_user(user, &tmp, *len) != 0) {
1556 BUGPRINT("c2u Didn't work\n");
1563 case EBT_SO_GET_ENTRIES:
1564 case EBT_SO_GET_INIT_ENTRIES:
1565 ret = copy_everything_to_user(t, user, len, cmd);
1566 mutex_unlock(&ebt_mutex);
1570 mutex_unlock(&ebt_mutex);
1577 #ifdef CONFIG_COMPAT
1578 /* 32 bit-userspace compatibility definitions. */
1579 struct compat_ebt_replace {
1580 char name[EBT_TABLE_MAXNAMELEN];
1581 compat_uint_t valid_hooks;
1582 compat_uint_t nentries;
1583 compat_uint_t entries_size;
1584 /* start of the chains */
1585 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1586 /* nr of counters userspace expects back */
1587 compat_uint_t num_counters;
1588 /* where the kernel will put the old counters. */
1589 compat_uptr_t counters;
1590 compat_uptr_t entries;
1593 /* struct ebt_entry_match, _target and _watcher have same layout */
1594 struct compat_ebt_entry_mwt {
1596 char name[EBT_FUNCTION_MAXNAMELEN];
1599 compat_uint_t match_size;
1600 compat_uint_t data[0];
1603 /* account for possible padding between match_size and ->data */
1604 static int ebt_compat_entry_padsize(void)
1606 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1607 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1608 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1609 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1612 static int ebt_compat_match_offset(const struct xt_match *match,
1613 unsigned int userlen)
1616 * ebt_among needs special handling. The kernel .matchsize is
1617 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1618 * value is expected.
1619 * Example: userspace sends 4500, ebt_among.c wants 4504.
1621 if (unlikely(match->matchsize == -1))
1622 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1623 return xt_compat_match_offset(match);
1626 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1629 const struct xt_match *match = m->u.match;
1630 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1631 int off = ebt_compat_match_offset(match, m->match_size);
1632 compat_uint_t msize = m->match_size - off;
1634 if (WARN_ON(off >= m->match_size))
1637 if (copy_to_user(cm->u.name, match->name,
1638 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1641 if (match->compat_to_user) {
1642 if (match->compat_to_user(cm->data, m->data))
1644 } else if (copy_to_user(cm->data, m->data, msize))
1647 *size -= ebt_compat_entry_padsize() + off;
1653 static int compat_target_to_user(struct ebt_entry_target *t,
1654 void __user **dstptr,
1657 const struct xt_target *target = t->u.target;
1658 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1659 int off = xt_compat_target_offset(target);
1660 compat_uint_t tsize = t->target_size - off;
1662 if (WARN_ON(off >= t->target_size))
1665 if (copy_to_user(cm->u.name, target->name,
1666 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1669 if (target->compat_to_user) {
1670 if (target->compat_to_user(cm->data, t->data))
1672 } else if (copy_to_user(cm->data, t->data, tsize))
1675 *size -= ebt_compat_entry_padsize() + off;
1681 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1682 void __user **dstptr,
1685 return compat_target_to_user((struct ebt_entry_target *)w,
1689 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1692 struct ebt_entry_target *t;
1693 struct ebt_entry __user *ce;
1694 u32 watchers_offset, target_offset, next_offset;
1695 compat_uint_t origsize;
1698 if (e->bitmask == 0) {
1699 if (*size < sizeof(struct ebt_entries))
1701 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1704 *dstptr += sizeof(struct ebt_entries);
1705 *size -= sizeof(struct ebt_entries);
1709 if (*size < sizeof(*ce))
1712 ce = (struct ebt_entry __user *)*dstptr;
1713 if (copy_to_user(ce, e, sizeof(*ce)))
1717 *dstptr += sizeof(*ce);
1719 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1722 watchers_offset = e->watchers_offset - (origsize - *size);
1724 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1727 target_offset = e->target_offset - (origsize - *size);
1729 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1731 ret = compat_target_to_user(t, dstptr, size);
1734 next_offset = e->next_offset - (origsize - *size);
1736 if (put_user(watchers_offset, &ce->watchers_offset) ||
1737 put_user(target_offset, &ce->target_offset) ||
1738 put_user(next_offset, &ce->next_offset))
1741 *size -= sizeof(*ce);
1745 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1747 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1748 *off += ebt_compat_entry_padsize();
1752 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1754 *off += xt_compat_target_offset(w->u.watcher);
1755 *off += ebt_compat_entry_padsize();
1759 static int compat_calc_entry(const struct ebt_entry *e,
1760 const struct ebt_table_info *info,
1762 struct compat_ebt_replace *newinfo)
1764 const struct ebt_entry_target *t;
1765 unsigned int entry_offset;
1768 if (e->bitmask == 0)
1772 entry_offset = (void *)e - base;
1774 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1775 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1777 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1779 off += xt_compat_target_offset(t->u.target);
1780 off += ebt_compat_entry_padsize();
1782 newinfo->entries_size -= off;
1784 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1788 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1789 const void *hookptr = info->hook_entry[i];
1790 if (info->hook_entry[i] &&
1791 (e < (struct ebt_entry *)(base - hookptr))) {
1792 newinfo->hook_entry[i] -= off;
1793 pr_debug("0x%08X -> 0x%08X\n",
1794 newinfo->hook_entry[i] + off,
1795 newinfo->hook_entry[i]);
1803 static int compat_table_info(const struct ebt_table_info *info,
1804 struct compat_ebt_replace *newinfo)
1806 unsigned int size = info->entries_size;
1807 const void *entries = info->entries;
1809 newinfo->entries_size = size;
1811 xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1812 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1816 static int compat_copy_everything_to_user(struct ebt_table *t,
1817 void __user *user, int *len, int cmd)
1819 struct compat_ebt_replace repl, tmp;
1820 struct ebt_counter *oldcounters;
1821 struct ebt_table_info tinfo;
1825 memset(&tinfo, 0, sizeof(tinfo));
1827 if (cmd == EBT_SO_GET_ENTRIES) {
1828 tinfo.entries_size = t->private->entries_size;
1829 tinfo.nentries = t->private->nentries;
1830 tinfo.entries = t->private->entries;
1831 oldcounters = t->private->counters;
1833 tinfo.entries_size = t->table->entries_size;
1834 tinfo.nentries = t->table->nentries;
1835 tinfo.entries = t->table->entries;
1836 oldcounters = t->table->counters;
1839 if (copy_from_user(&tmp, user, sizeof(tmp)))
1842 if (tmp.nentries != tinfo.nentries ||
1843 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1846 memcpy(&repl, &tmp, sizeof(repl));
1847 if (cmd == EBT_SO_GET_ENTRIES)
1848 ret = compat_table_info(t->private, &repl);
1850 ret = compat_table_info(&tinfo, &repl);
1854 if (*len != sizeof(tmp) + repl.entries_size +
1855 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1856 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1857 *len, tinfo.entries_size, repl.entries_size);
1861 /* userspace might not need the counters */
1862 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1863 tmp.num_counters, tinfo.nentries);
1867 pos = compat_ptr(tmp.entries);
1868 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1869 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1872 struct ebt_entries_buf_state {
1873 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1874 u32 buf_kern_len; /* total size of kernel buffer */
1875 u32 buf_kern_offset; /* amount of data copied so far */
1876 u32 buf_user_offset; /* read position in userspace buffer */
1879 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1881 state->buf_kern_offset += sz;
1882 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1885 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1886 const void *data, unsigned int sz)
1888 if (state->buf_kern_start == NULL)
1891 if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len))
1894 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1897 state->buf_user_offset += sz;
1898 return ebt_buf_count(state, sz);
1901 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1903 char *b = state->buf_kern_start;
1905 if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len))
1908 if (b != NULL && sz > 0)
1909 memset(b + state->buf_kern_offset, 0, sz);
1910 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1911 return ebt_buf_count(state, sz);
1920 static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
1921 enum compat_mwt compat_mwt,
1922 struct ebt_entries_buf_state *state,
1923 const unsigned char *base)
1925 char name[EBT_FUNCTION_MAXNAMELEN];
1926 struct xt_match *match;
1927 struct xt_target *wt;
1930 unsigned int size_kern, match_size = mwt->match_size;
1932 if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
1935 if (state->buf_kern_start)
1936 dst = state->buf_kern_start + state->buf_kern_offset;
1938 switch (compat_mwt) {
1939 case EBT_COMPAT_MATCH:
1940 match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
1942 return PTR_ERR(match);
1944 off = ebt_compat_match_offset(match, match_size);
1946 if (match->compat_from_user)
1947 match->compat_from_user(dst, mwt->data);
1949 memcpy(dst, mwt->data, match_size);
1952 size_kern = match->matchsize;
1953 if (unlikely(size_kern == -1))
1954 size_kern = match_size;
1955 module_put(match->me);
1957 case EBT_COMPAT_WATCHER: /* fallthrough */
1958 case EBT_COMPAT_TARGET:
1959 wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
1962 off = xt_compat_target_offset(wt);
1965 if (wt->compat_from_user)
1966 wt->compat_from_user(dst, mwt->data);
1968 memcpy(dst, mwt->data, match_size);
1971 size_kern = wt->targetsize;
1979 state->buf_kern_offset += match_size + off;
1980 state->buf_user_offset += match_size;
1981 pad = XT_ALIGN(size_kern) - size_kern;
1983 if (pad > 0 && dst) {
1984 if (WARN_ON(state->buf_kern_len <= pad))
1986 if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad))
1988 memset(dst + size_kern, 0, pad);
1990 return off + match_size;
1994 * return size of all matches, watchers or target, including necessary
1995 * alignment and padding.
1997 static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
1998 unsigned int size_left, enum compat_mwt type,
1999 struct ebt_entries_buf_state *state, const void *base)
2001 const char *buf = (const char *)match32;
2008 struct ebt_entry_match *match_kern;
2011 if (size_left < sizeof(*match32))
2014 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
2017 tmp = state->buf_kern_start + state->buf_kern_offset;
2018 match_kern = (struct ebt_entry_match *) tmp;
2020 ret = ebt_buf_add(state, buf, sizeof(*match32));
2023 size_left -= sizeof(*match32);
2025 /* add padding before match->data (if any) */
2026 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
2030 if (match32->match_size > size_left)
2033 size_left -= match32->match_size;
2035 ret = compat_mtw_from_user(match32, type, state, base);
2039 if (WARN_ON(ret < match32->match_size))
2041 growth += ret - match32->match_size;
2042 growth += ebt_compat_entry_padsize();
2044 buf += sizeof(*match32);
2045 buf += match32->match_size;
2048 match_kern->match_size = ret;
2050 match32 = (struct compat_ebt_entry_mwt *) buf;
2051 } while (size_left);
2056 /* called for all ebt_entry structures. */
2057 static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
2058 unsigned int *total,
2059 struct ebt_entries_buf_state *state)
2061 unsigned int i, j, startoff, next_expected_off, new_offset = 0;
2062 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2063 unsigned int offsets[4];
2064 unsigned int *offsets_update = NULL;
2068 if (*total < sizeof(struct ebt_entries))
2071 if (!entry->bitmask) {
2072 *total -= sizeof(struct ebt_entries);
2073 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2075 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2078 startoff = state->buf_user_offset;
2079 /* pull in most part of ebt_entry, it does not need to be changed. */
2080 ret = ebt_buf_add(state, entry,
2081 offsetof(struct ebt_entry, watchers_offset));
2085 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2086 memcpy(&offsets[1], &entry->watchers_offset,
2087 sizeof(offsets) - sizeof(offsets[0]));
2089 if (state->buf_kern_start) {
2090 buf_start = state->buf_kern_start + state->buf_kern_offset;
2091 offsets_update = (unsigned int *) buf_start;
2093 ret = ebt_buf_add(state, &offsets[1],
2094 sizeof(offsets) - sizeof(offsets[0]));
2097 buf_start = (char *) entry;
2099 * 0: matches offset, always follows ebt_entry.
2100 * 1: watchers offset, from ebt_entry structure
2101 * 2: target offset, from ebt_entry structure
2102 * 3: next ebt_entry offset, from ebt_entry structure
2104 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2106 for (i = 0; i < 4 ; ++i) {
2107 if (offsets[i] > *total)
2110 if (i < 3 && offsets[i] == *total)
2115 if (offsets[i-1] > offsets[i])
2119 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2120 struct compat_ebt_entry_mwt *match32;
2122 char *buf = buf_start;
2124 buf = buf_start + offsets[i];
2125 if (offsets[i] > offsets[j])
2128 match32 = (struct compat_ebt_entry_mwt *) buf;
2129 size = offsets[j] - offsets[i];
2130 ret = ebt_size_mwt(match32, size, i, state, base);
2134 if (offsets_update && new_offset) {
2135 pr_debug("change offset %d to %d\n",
2136 offsets_update[i], offsets[j] + new_offset);
2137 offsets_update[i] = offsets[j] + new_offset;
2141 if (state->buf_kern_start == NULL) {
2142 unsigned int offset = buf_start - (char *) base;
2144 ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2149 next_expected_off = state->buf_user_offset - startoff;
2150 if (next_expected_off != entry->next_offset)
2153 if (*total < entry->next_offset)
2155 *total -= entry->next_offset;
2160 * repl->entries_size is the size of the ebt_entry blob in userspace.
2161 * It might need more memory when copied to a 64 bit kernel in case
2162 * userspace is 32-bit. So, first task: find out how much memory is needed.
2164 * Called before validation is performed.
2166 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2167 struct ebt_entries_buf_state *state)
2169 unsigned int size_remaining = size_user;
2172 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2173 &size_remaining, state);
2180 return state->buf_kern_offset;
2184 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2185 void __user *user, unsigned int len)
2187 struct compat_ebt_replace tmp;
2190 if (len < sizeof(tmp))
2193 if (copy_from_user(&tmp, user, sizeof(tmp)))
2196 if (len != sizeof(tmp) + tmp.entries_size)
2199 if (tmp.entries_size == 0)
2202 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2203 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2205 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2208 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2210 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2211 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2212 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2214 repl->num_counters = tmp.num_counters;
2215 repl->counters = compat_ptr(tmp.counters);
2216 repl->entries = compat_ptr(tmp.entries);
2220 static int compat_do_replace(struct net *net, void __user *user,
2223 int ret, i, countersize, size64;
2224 struct ebt_table_info *newinfo;
2225 struct ebt_replace tmp;
2226 struct ebt_entries_buf_state state;
2229 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2231 /* try real handler in case userland supplied needed padding */
2232 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2237 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2238 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2243 memset(newinfo->counters, 0, countersize);
2245 memset(&state, 0, sizeof(state));
2247 newinfo->entries = vmalloc(tmp.entries_size);
2248 if (!newinfo->entries) {
2253 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2258 entries_tmp = newinfo->entries;
2260 xt_compat_lock(NFPROTO_BRIDGE);
2262 xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2263 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2267 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2268 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2269 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2272 newinfo->entries = vmalloc(size64);
2273 if (!newinfo->entries) {
2279 memset(&state, 0, sizeof(state));
2280 state.buf_kern_start = newinfo->entries;
2281 state.buf_kern_len = size64;
2283 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2284 if (WARN_ON(ret < 0)) {
2290 tmp.entries_size = size64;
2292 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2293 char __user *usrptr;
2294 if (tmp.hook_entry[i]) {
2296 usrptr = (char __user *) tmp.hook_entry[i];
2297 delta = usrptr - tmp.entries;
2298 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2299 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2303 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2304 xt_compat_unlock(NFPROTO_BRIDGE);
2306 ret = do_replace_finish(net, &tmp, newinfo);
2310 vfree(newinfo->entries);
2315 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2316 xt_compat_unlock(NFPROTO_BRIDGE);
2320 static int compat_update_counters(struct net *net, void __user *user,
2323 struct compat_ebt_replace hlp;
2325 if (copy_from_user(&hlp, user, sizeof(hlp)))
2328 /* try real handler in case userland supplied needed padding */
2329 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2330 return update_counters(net, user, len);
2332 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2333 hlp.num_counters, user, len);
2336 static int compat_do_ebt_set_ctl(struct sock *sk,
2337 int cmd, void __user *user, unsigned int len)
2340 struct net *net = sock_net(sk);
2342 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2346 case EBT_SO_SET_ENTRIES:
2347 ret = compat_do_replace(net, user, len);
2349 case EBT_SO_SET_COUNTERS:
2350 ret = compat_update_counters(net, user, len);
2358 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2359 void __user *user, int *len)
2362 struct compat_ebt_replace tmp;
2363 struct ebt_table *t;
2364 struct net *net = sock_net(sk);
2366 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2369 /* try real handler in case userland supplied needed padding */
2370 if ((cmd == EBT_SO_GET_INFO ||
2371 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2372 return do_ebt_get_ctl(sk, cmd, user, len);
2374 if (copy_from_user(&tmp, user, sizeof(tmp)))
2377 tmp.name[sizeof(tmp.name) - 1] = '\0';
2379 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2383 xt_compat_lock(NFPROTO_BRIDGE);
2385 case EBT_SO_GET_INFO:
2386 tmp.nentries = t->private->nentries;
2387 ret = compat_table_info(t->private, &tmp);
2390 tmp.valid_hooks = t->valid_hooks;
2392 if (copy_to_user(user, &tmp, *len) != 0) {
2398 case EBT_SO_GET_INIT_INFO:
2399 tmp.nentries = t->table->nentries;
2400 tmp.entries_size = t->table->entries_size;
2401 tmp.valid_hooks = t->table->valid_hooks;
2403 if (copy_to_user(user, &tmp, *len) != 0) {
2409 case EBT_SO_GET_ENTRIES:
2410 case EBT_SO_GET_INIT_ENTRIES:
2412 * try real handler first in case of userland-side padding.
2413 * in case we are dealing with an 'ordinary' 32 bit binary
2414 * without 64bit compatibility padding, this will fail right
2415 * after copy_from_user when the *len argument is validated.
2417 * the compat_ variant needs to do one pass over the kernel
2418 * data set to adjust for size differences before it the check.
2420 if (copy_everything_to_user(t, user, len, cmd) == 0)
2423 ret = compat_copy_everything_to_user(t, user, len, cmd);
2429 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2430 xt_compat_unlock(NFPROTO_BRIDGE);
2431 mutex_unlock(&ebt_mutex);
2436 static struct nf_sockopt_ops ebt_sockopts = {
2438 .set_optmin = EBT_BASE_CTL,
2439 .set_optmax = EBT_SO_SET_MAX + 1,
2440 .set = do_ebt_set_ctl,
2441 #ifdef CONFIG_COMPAT
2442 .compat_set = compat_do_ebt_set_ctl,
2444 .get_optmin = EBT_BASE_CTL,
2445 .get_optmax = EBT_SO_GET_MAX + 1,
2446 .get = do_ebt_get_ctl,
2447 #ifdef CONFIG_COMPAT
2448 .compat_get = compat_do_ebt_get_ctl,
2450 .owner = THIS_MODULE,
2453 static int __init ebtables_init(void)
2457 ret = xt_register_target(&ebt_standard_target);
2460 ret = nf_register_sockopt(&ebt_sockopts);
2462 xt_unregister_target(&ebt_standard_target);
2466 printk(KERN_INFO "Ebtables v2.0 registered\n");
2470 static void __exit ebtables_fini(void)
2472 nf_unregister_sockopt(&ebt_sockopts);
2473 xt_unregister_target(&ebt_standard_target);
2474 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2477 EXPORT_SYMBOL(ebt_register_table);
2478 EXPORT_SYMBOL(ebt_unregister_table);
2479 EXPORT_SYMBOL(ebt_do_table);
2480 module_init(ebtables_init);
2481 module_exit(ebtables_fini);
2482 MODULE_LICENSE("GPL");