2 * x_tables core - Backend for {ip,ip6,arp}_tables
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
7 * Based on existing ip_tables code which is
8 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
9 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/socket.h>
20 #include <linux/net.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/string.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mutex.h>
27 #include <linux/slab.h>
28 #include <linux/audit.h>
29 #include <linux/user_namespace.h>
30 #include <net/net_namespace.h>
32 #include <linux/netfilter/x_tables.h>
33 #include <linux/netfilter_arp.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
35 #include <linux/netfilter_ipv6/ip6_tables.h>
36 #include <linux/netfilter_arp/arp_tables.h>
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
40 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
42 #define XT_PCPU_BLOCK_SIZE 4096
45 unsigned int offset; /* offset in kernel */
46 int delta; /* delta in 32bit user land */
51 struct list_head match;
52 struct list_head target;
54 struct mutex compat_mutex;
55 struct compat_delta *compat_tab;
56 unsigned int number; /* number of slots in compat_tab[] */
57 unsigned int cur; /* number of used slots in compat_tab[] */
61 static struct xt_af *xt;
63 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
64 [NFPROTO_UNSPEC] = "x",
65 [NFPROTO_IPV4] = "ip",
66 [NFPROTO_ARP] = "arp",
67 [NFPROTO_BRIDGE] = "eb",
68 [NFPROTO_IPV6] = "ip6",
71 /* Registration hooks for targets. */
72 int xt_register_target(struct xt_target *target)
74 u_int8_t af = target->family;
76 mutex_lock(&xt[af].mutex);
77 list_add(&target->list, &xt[af].target);
78 mutex_unlock(&xt[af].mutex);
81 EXPORT_SYMBOL(xt_register_target);
84 xt_unregister_target(struct xt_target *target)
86 u_int8_t af = target->family;
88 mutex_lock(&xt[af].mutex);
89 list_del(&target->list);
90 mutex_unlock(&xt[af].mutex);
92 EXPORT_SYMBOL(xt_unregister_target);
95 xt_register_targets(struct xt_target *target, unsigned int n)
100 for (i = 0; i < n; i++) {
101 err = xt_register_target(&target[i]);
109 xt_unregister_targets(target, i);
112 EXPORT_SYMBOL(xt_register_targets);
115 xt_unregister_targets(struct xt_target *target, unsigned int n)
118 xt_unregister_target(&target[n]);
120 EXPORT_SYMBOL(xt_unregister_targets);
122 int xt_register_match(struct xt_match *match)
124 u_int8_t af = match->family;
126 mutex_lock(&xt[af].mutex);
127 list_add(&match->list, &xt[af].match);
128 mutex_unlock(&xt[af].mutex);
131 EXPORT_SYMBOL(xt_register_match);
134 xt_unregister_match(struct xt_match *match)
136 u_int8_t af = match->family;
138 mutex_lock(&xt[af].mutex);
139 list_del(&match->list);
140 mutex_unlock(&xt[af].mutex);
142 EXPORT_SYMBOL(xt_unregister_match);
145 xt_register_matches(struct xt_match *match, unsigned int n)
150 for (i = 0; i < n; i++) {
151 err = xt_register_match(&match[i]);
159 xt_unregister_matches(match, i);
162 EXPORT_SYMBOL(xt_register_matches);
165 xt_unregister_matches(struct xt_match *match, unsigned int n)
168 xt_unregister_match(&match[n]);
170 EXPORT_SYMBOL(xt_unregister_matches);
174 * These are weird, but module loading must not be done with mutex
175 * held (since they will register), and we have to have a single
179 /* Find match, grabs ref. Returns ERR_PTR() on error. */
180 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
185 mutex_lock(&xt[af].mutex);
186 list_for_each_entry(m, &xt[af].match, list) {
187 if (strcmp(m->name, name) == 0) {
188 if (m->revision == revision) {
189 if (try_module_get(m->me)) {
190 mutex_unlock(&xt[af].mutex);
194 err = -EPROTOTYPE; /* Found something. */
197 mutex_unlock(&xt[af].mutex);
199 if (af != NFPROTO_UNSPEC)
200 /* Try searching again in the family-independent list */
201 return xt_find_match(NFPROTO_UNSPEC, name, revision);
205 EXPORT_SYMBOL(xt_find_match);
208 xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
210 struct xt_match *match;
212 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
213 return ERR_PTR(-EINVAL);
215 match = xt_find_match(nfproto, name, revision);
217 request_module("%st_%s", xt_prefix[nfproto], name);
218 match = xt_find_match(nfproto, name, revision);
223 EXPORT_SYMBOL_GPL(xt_request_find_match);
225 /* Find target, grabs ref. Returns ERR_PTR() on error. */
226 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
231 mutex_lock(&xt[af].mutex);
232 list_for_each_entry(t, &xt[af].target, list) {
233 if (strcmp(t->name, name) == 0) {
234 if (t->revision == revision) {
235 if (try_module_get(t->me)) {
236 mutex_unlock(&xt[af].mutex);
240 err = -EPROTOTYPE; /* Found something. */
243 mutex_unlock(&xt[af].mutex);
245 if (af != NFPROTO_UNSPEC)
246 /* Try searching again in the family-independent list */
247 return xt_find_target(NFPROTO_UNSPEC, name, revision);
251 EXPORT_SYMBOL(xt_find_target);
253 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
255 struct xt_target *target;
257 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
258 return ERR_PTR(-EINVAL);
260 target = xt_find_target(af, name, revision);
261 if (IS_ERR(target)) {
262 request_module("%st_%s", xt_prefix[af], name);
263 target = xt_find_target(af, name, revision);
268 EXPORT_SYMBOL_GPL(xt_request_find_target);
270 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
272 const struct xt_match *m;
275 mutex_lock(&xt[af].mutex);
276 list_for_each_entry(m, &xt[af].match, list) {
277 if (strcmp(m->name, name) == 0) {
278 if (m->revision > *bestp)
279 *bestp = m->revision;
280 if (m->revision == revision)
284 mutex_unlock(&xt[af].mutex);
286 if (af != NFPROTO_UNSPEC && !have_rev)
287 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
292 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
294 const struct xt_target *t;
297 mutex_lock(&xt[af].mutex);
298 list_for_each_entry(t, &xt[af].target, list) {
299 if (strcmp(t->name, name) == 0) {
300 if (t->revision > *bestp)
301 *bestp = t->revision;
302 if (t->revision == revision)
306 mutex_unlock(&xt[af].mutex);
308 if (af != NFPROTO_UNSPEC && !have_rev)
309 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
314 /* Returns true or false (if no such extension at all) */
315 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
318 int have_rev, best = -1;
321 have_rev = target_revfn(af, name, revision, &best);
323 have_rev = match_revfn(af, name, revision, &best);
325 /* Nothing at all? Return 0 to try loading module. */
333 *err = -EPROTONOSUPPORT;
336 EXPORT_SYMBOL_GPL(xt_find_revision);
339 textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
341 static const char *const inetbr_names[] = {
342 "PREROUTING", "INPUT", "FORWARD",
343 "OUTPUT", "POSTROUTING", "BROUTING",
345 static const char *const arp_names[] = {
346 "INPUT", "FORWARD", "OUTPUT",
348 const char *const *names;
354 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
355 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
356 ARRAY_SIZE(inetbr_names);
358 for (i = 0; i < max; ++i) {
359 if (!(mask & (1 << i)))
361 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
373 * xt_check_proc_name - check that name is suitable for /proc file creation
375 * @name: file name candidate
376 * @size: length of buffer
378 * some x_tables modules wish to create a file in /proc.
379 * This function makes sure that the name is suitable for this
380 * purpose, it checks that name is NUL terminated and isn't a 'special'
383 * returns negative number on error or 0 if name is useable.
385 int xt_check_proc_name(const char *name, unsigned int size)
390 if (strnlen(name, size) == size)
391 return -ENAMETOOLONG;
393 if (strcmp(name, ".") == 0 ||
394 strcmp(name, "..") == 0 ||
400 EXPORT_SYMBOL(xt_check_proc_name);
402 int xt_check_match(struct xt_mtchk_param *par,
403 unsigned int size, u_int8_t proto, bool inv_proto)
407 if (XT_ALIGN(par->match->matchsize) != size &&
408 par->match->matchsize != -1) {
410 * ebt_among is exempt from centralized matchsize checking
411 * because it uses a dynamic-size data set.
413 pr_err("%s_tables: %s.%u match: invalid size "
414 "%u (kernel) != (user) %u\n",
415 xt_prefix[par->family], par->match->name,
416 par->match->revision,
417 XT_ALIGN(par->match->matchsize), size);
420 if (par->match->table != NULL &&
421 strcmp(par->match->table, par->table) != 0) {
422 pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
423 xt_prefix[par->family], par->match->name,
424 par->match->table, par->table);
427 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
428 char used[64], allow[64];
430 pr_err("%s_tables: %s match: used from hooks %s, but only "
432 xt_prefix[par->family], par->match->name,
433 textify_hooks(used, sizeof(used), par->hook_mask,
435 textify_hooks(allow, sizeof(allow), par->match->hooks,
439 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
440 pr_err("%s_tables: %s match: only valid for protocol %u\n",
441 xt_prefix[par->family], par->match->name,
445 if (par->match->checkentry != NULL) {
446 ret = par->match->checkentry(par);
450 /* Flag up potential errors. */
455 EXPORT_SYMBOL_GPL(xt_check_match);
457 /** xt_check_entry_match - check that matches end before start of target
459 * @match: beginning of xt_entry_match
460 * @target: beginning of this rules target (alleged end of matches)
461 * @alignment: alignment requirement of match structures
463 * Validates that all matches add up to the beginning of the target,
464 * and that each match covers at least the base structure size.
466 * Return: 0 on success, negative errno on failure.
468 static int xt_check_entry_match(const char *match, const char *target,
469 const size_t alignment)
471 const struct xt_entry_match *pos;
472 int length = target - match;
474 if (length == 0) /* no matches */
477 pos = (struct xt_entry_match *)match;
479 if ((unsigned long)pos % alignment)
482 if (length < (int)sizeof(struct xt_entry_match))
485 if (pos->u.match_size < sizeof(struct xt_entry_match))
488 if (pos->u.match_size > length)
491 length -= pos->u.match_size;
492 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
493 } while (length > 0);
499 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
501 struct xt_af *xp = &xt[af];
503 if (!xp->compat_tab) {
506 xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
512 if (xp->cur >= xp->number)
516 delta += xp->compat_tab[xp->cur - 1].delta;
517 xp->compat_tab[xp->cur].offset = offset;
518 xp->compat_tab[xp->cur].delta = delta;
522 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
524 void xt_compat_flush_offsets(u_int8_t af)
526 if (xt[af].compat_tab) {
527 vfree(xt[af].compat_tab);
528 xt[af].compat_tab = NULL;
533 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
535 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
537 struct compat_delta *tmp = xt[af].compat_tab;
538 int mid, left = 0, right = xt[af].cur - 1;
540 while (left <= right) {
541 mid = (left + right) >> 1;
542 if (offset > tmp[mid].offset)
544 else if (offset < tmp[mid].offset)
547 return mid ? tmp[mid - 1].delta : 0;
549 return left ? tmp[left - 1].delta : 0;
551 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
553 void xt_compat_init_offsets(u_int8_t af, unsigned int number)
555 xt[af].number = number;
558 EXPORT_SYMBOL(xt_compat_init_offsets);
560 int xt_compat_match_offset(const struct xt_match *match)
562 u_int16_t csize = match->compatsize ? : match->matchsize;
563 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
565 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
567 void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
570 const struct xt_match *match = m->u.kernel.match;
571 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
572 int off = xt_compat_match_offset(match);
573 u_int16_t msize = cm->u.user.match_size;
574 char name[sizeof(m->u.user.name)];
577 memcpy(m, cm, sizeof(*cm));
578 if (match->compat_from_user)
579 match->compat_from_user(m->data, cm->data);
581 memcpy(m->data, cm->data, msize - sizeof(*cm));
584 m->u.user.match_size = msize;
585 strlcpy(name, match->name, sizeof(name));
586 module_put(match->me);
587 strncpy(m->u.user.name, name, sizeof(m->u.user.name));
592 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
594 int xt_compat_match_to_user(const struct xt_entry_match *m,
595 void __user **dstptr, unsigned int *size)
597 const struct xt_match *match = m->u.kernel.match;
598 struct compat_xt_entry_match __user *cm = *dstptr;
599 int off = xt_compat_match_offset(match);
600 u_int16_t msize = m->u.user.match_size - off;
602 if (copy_to_user(cm, m, sizeof(*cm)) ||
603 put_user(msize, &cm->u.user.match_size) ||
604 copy_to_user(cm->u.user.name, m->u.kernel.match->name,
605 strlen(m->u.kernel.match->name) + 1))
608 if (match->compat_to_user) {
609 if (match->compat_to_user((void __user *)cm->data, m->data))
612 if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
620 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
622 /* non-compat version may have padding after verdict */
623 struct compat_xt_standard_target {
624 struct compat_xt_entry_target t;
625 compat_uint_t verdict;
628 int xt_compat_check_entry_offsets(const void *base, const char *elems,
629 unsigned int target_offset,
630 unsigned int next_offset)
632 long size_of_base_struct = elems - (const char *)base;
633 const struct compat_xt_entry_target *t;
634 const char *e = base;
636 if (target_offset < size_of_base_struct)
639 if (target_offset + sizeof(*t) > next_offset)
642 t = (void *)(e + target_offset);
643 if (t->u.target_size < sizeof(*t))
646 if (target_offset + t->u.target_size > next_offset)
649 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
650 COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
653 /* compat_xt_entry match has less strict aligment requirements,
654 * otherwise they are identical. In case of padding differences
655 * we need to add compat version of xt_check_entry_match.
657 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
659 return xt_check_entry_match(elems, base + target_offset,
660 __alignof__(struct compat_xt_entry_match));
662 EXPORT_SYMBOL(xt_compat_check_entry_offsets);
663 #endif /* CONFIG_COMPAT */
666 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
668 * @base: pointer to arp/ip/ip6t_entry
669 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
670 * @target_offset: the arp/ip/ip6_t->target_offset
671 * @next_offset: the arp/ip/ip6_t->next_offset
673 * validates that target_offset and next_offset are sane and that all
674 * match sizes (if any) align with the target offset.
676 * This function does not validate the targets or matches themselves, it
677 * only tests that all the offsets and sizes are correct, that all
678 * match structures are aligned, and that the last structure ends where
679 * the target structure begins.
681 * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
683 * The arp/ip/ip6t_entry structure @base must have passed following tests:
684 * - it must point to a valid memory location
685 * - base to base + next_offset must be accessible, i.e. not exceed allocated
688 * A well-formed entry looks like this:
690 * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
691 * e->elems[]-----' | |
695 * target_offset---------------------------------' |
696 * next_offset---------------------------------------------------'
698 * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
699 * This is where matches (if any) and the target reside.
700 * target_offset: beginning of target.
701 * next_offset: start of the next rule; also: size of this rule.
702 * Since targets have a minimum size, target_offset + minlen <= next_offset.
704 * Every match stores its size, sum of sizes must not exceed target_offset.
706 * Return: 0 on success, negative errno on failure.
708 int xt_check_entry_offsets(const void *base,
710 unsigned int target_offset,
711 unsigned int next_offset)
713 long size_of_base_struct = elems - (const char *)base;
714 const struct xt_entry_target *t;
715 const char *e = base;
717 /* target start is within the ip/ip6/arpt_entry struct */
718 if (target_offset < size_of_base_struct)
721 if (target_offset + sizeof(*t) > next_offset)
724 t = (void *)(e + target_offset);
725 if (t->u.target_size < sizeof(*t))
728 if (target_offset + t->u.target_size > next_offset)
731 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
732 XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
735 return xt_check_entry_match(elems, base + target_offset,
736 __alignof__(struct xt_entry_match));
738 EXPORT_SYMBOL(xt_check_entry_offsets);
741 * xt_alloc_entry_offsets - allocate array to store rule head offsets
743 * @size: number of entries
745 * Return: NULL or kmalloc'd or vmalloc'd array
747 unsigned int *xt_alloc_entry_offsets(unsigned int size)
751 off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
756 if (size < (SIZE_MAX / sizeof(unsigned int)))
757 off = vmalloc(size * sizeof(unsigned int));
761 EXPORT_SYMBOL(xt_alloc_entry_offsets);
764 * xt_find_jump_offset - check if target is a valid jump offset
766 * @offsets: array containing all valid rule start offsets of a rule blob
767 * @target: the jump target to search for
768 * @size: entries in @offset
770 bool xt_find_jump_offset(const unsigned int *offsets,
771 unsigned int target, unsigned int size)
773 int m, low = 0, hi = size;
778 if (offsets[m] > target)
780 else if (offsets[m] < target)
788 EXPORT_SYMBOL(xt_find_jump_offset);
790 int xt_check_target(struct xt_tgchk_param *par,
791 unsigned int size, u_int8_t proto, bool inv_proto)
795 if (XT_ALIGN(par->target->targetsize) != size) {
796 pr_err("%s_tables: %s.%u target: invalid size "
797 "%u (kernel) != (user) %u\n",
798 xt_prefix[par->family], par->target->name,
799 par->target->revision,
800 XT_ALIGN(par->target->targetsize), size);
803 if (par->target->table != NULL &&
804 strcmp(par->target->table, par->table) != 0) {
805 pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
806 xt_prefix[par->family], par->target->name,
807 par->target->table, par->table);
810 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
811 char used[64], allow[64];
813 pr_err("%s_tables: %s target: used from hooks %s, but only "
815 xt_prefix[par->family], par->target->name,
816 textify_hooks(used, sizeof(used), par->hook_mask,
818 textify_hooks(allow, sizeof(allow), par->target->hooks,
822 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
823 pr_err("%s_tables: %s target: only valid for protocol %u\n",
824 xt_prefix[par->family], par->target->name,
828 if (par->target->checkentry != NULL) {
829 ret = par->target->checkentry(par);
833 /* Flag up potential errors. */
838 EXPORT_SYMBOL_GPL(xt_check_target);
841 * xt_copy_counters_from_user - copy counters and metadata from userspace
843 * @user: src pointer to userspace memory
844 * @len: alleged size of userspace memory
845 * @info: where to store the xt_counters_info metadata
846 * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
848 * Copies counter meta data from @user and stores it in @info.
850 * vmallocs memory to hold the counters, then copies the counter data
851 * from @user to the new memory and returns a pointer to it.
853 * If @compat is true, @info gets converted automatically to the 64bit
856 * The metadata associated with the counters is stored in @info.
858 * Return: returns pointer that caller has to test via IS_ERR().
859 * If IS_ERR is false, caller has to vfree the pointer.
861 void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
862 struct xt_counters_info *info, bool compat)
869 /* structures only differ in size due to alignment */
870 struct compat_xt_counters_info compat_tmp;
872 if (len <= sizeof(compat_tmp))
873 return ERR_PTR(-EINVAL);
875 len -= sizeof(compat_tmp);
876 if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
877 return ERR_PTR(-EFAULT);
879 memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
880 info->num_counters = compat_tmp.num_counters;
881 user += sizeof(compat_tmp);
885 if (len <= sizeof(*info))
886 return ERR_PTR(-EINVAL);
888 len -= sizeof(*info);
889 if (copy_from_user(info, user, sizeof(*info)) != 0)
890 return ERR_PTR(-EFAULT);
892 user += sizeof(*info);
894 info->name[sizeof(info->name) - 1] = '\0';
896 size = sizeof(struct xt_counters);
897 size *= info->num_counters;
899 if (size != (u64)len)
900 return ERR_PTR(-EINVAL);
904 return ERR_PTR(-ENOMEM);
906 if (copy_from_user(mem, user, len) == 0)
910 return ERR_PTR(-EFAULT);
912 EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
915 int xt_compat_target_offset(const struct xt_target *target)
917 u_int16_t csize = target->compatsize ? : target->targetsize;
918 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
920 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
922 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
925 const struct xt_target *target = t->u.kernel.target;
926 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
927 int off = xt_compat_target_offset(target);
928 u_int16_t tsize = ct->u.user.target_size;
929 char name[sizeof(t->u.user.name)];
932 memcpy(t, ct, sizeof(*ct));
933 if (target->compat_from_user)
934 target->compat_from_user(t->data, ct->data);
936 memcpy(t->data, ct->data, tsize - sizeof(*ct));
939 t->u.user.target_size = tsize;
940 strlcpy(name, target->name, sizeof(name));
941 module_put(target->me);
942 strncpy(t->u.user.name, name, sizeof(t->u.user.name));
947 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
949 int xt_compat_target_to_user(const struct xt_entry_target *t,
950 void __user **dstptr, unsigned int *size)
952 const struct xt_target *target = t->u.kernel.target;
953 struct compat_xt_entry_target __user *ct = *dstptr;
954 int off = xt_compat_target_offset(target);
955 u_int16_t tsize = t->u.user.target_size - off;
957 if (copy_to_user(ct, t, sizeof(*ct)) ||
958 put_user(tsize, &ct->u.user.target_size) ||
959 copy_to_user(ct->u.user.name, t->u.kernel.target->name,
960 strlen(t->u.kernel.target->name) + 1))
963 if (target->compat_to_user) {
964 if (target->compat_to_user((void __user *)ct->data, t->data))
967 if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
975 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
978 struct xt_table_info *xt_alloc_table_info(unsigned int size)
980 struct xt_table_info *info = NULL;
981 size_t sz = sizeof(*info) + size;
983 if (sz < sizeof(*info))
986 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
987 if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
990 if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
991 info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
997 memset(info, 0, sizeof(*info));
1001 EXPORT_SYMBOL(xt_alloc_table_info);
1003 void xt_free_table_info(struct xt_table_info *info)
1007 if (info->jumpstack != NULL) {
1008 for_each_possible_cpu(cpu)
1009 kvfree(info->jumpstack[cpu]);
1010 kvfree(info->jumpstack);
1015 EXPORT_SYMBOL(xt_free_table_info);
1017 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
1018 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1021 struct xt_table *t, *found = NULL;
1023 mutex_lock(&xt[af].mutex);
1024 list_for_each_entry(t, &net->xt.tables[af], list)
1025 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1028 if (net == &init_net)
1031 /* Table doesn't exist in this netns, re-try init */
1032 list_for_each_entry(t, &init_net.xt.tables[af], list) {
1033 if (strcmp(t->name, name))
1035 if (!try_module_get(t->me)) {
1036 mutex_unlock(&xt[af].mutex);
1040 mutex_unlock(&xt[af].mutex);
1041 if (t->table_init(net) != 0) {
1048 mutex_lock(&xt[af].mutex);
1055 /* and once again: */
1056 list_for_each_entry(t, &net->xt.tables[af], list)
1057 if (strcmp(t->name, name) == 0)
1060 module_put(found->me);
1062 mutex_unlock(&xt[af].mutex);
1065 EXPORT_SYMBOL_GPL(xt_find_table_lock);
1067 void xt_table_unlock(struct xt_table *table)
1069 mutex_unlock(&xt[table->af].mutex);
1071 EXPORT_SYMBOL_GPL(xt_table_unlock);
1073 #ifdef CONFIG_COMPAT
1074 void xt_compat_lock(u_int8_t af)
1076 mutex_lock(&xt[af].compat_mutex);
1078 EXPORT_SYMBOL_GPL(xt_compat_lock);
1080 void xt_compat_unlock(u_int8_t af)
1082 mutex_unlock(&xt[af].compat_mutex);
1084 EXPORT_SYMBOL_GPL(xt_compat_unlock);
1087 DEFINE_PER_CPU(seqcount_t, xt_recseq);
1088 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1090 struct static_key xt_tee_enabled __read_mostly;
1091 EXPORT_SYMBOL_GPL(xt_tee_enabled);
1093 static int xt_jumpstack_alloc(struct xt_table_info *i)
1098 size = sizeof(void **) * nr_cpu_ids;
1099 if (size > PAGE_SIZE)
1100 i->jumpstack = vzalloc(size);
1102 i->jumpstack = kzalloc(size, GFP_KERNEL);
1103 if (i->jumpstack == NULL)
1106 /* ruleset without jumps -- no stack needed */
1107 if (i->stacksize == 0)
1110 /* Jumpstack needs to be able to record two full callchains, one
1111 * from the first rule set traversal, plus one table reentrancy
1112 * via -j TEE without clobbering the callchain that brought us to
1115 * This is done by allocating two jumpstacks per cpu, on reentry
1116 * the upper half of the stack is used.
1118 * see the jumpstack setup in ipt_do_table() for more details.
1120 size = sizeof(void *) * i->stacksize * 2u;
1121 for_each_possible_cpu(cpu) {
1122 if (size > PAGE_SIZE)
1123 i->jumpstack[cpu] = vmalloc_node(size,
1126 i->jumpstack[cpu] = kmalloc_node(size,
1127 GFP_KERNEL, cpu_to_node(cpu));
1128 if (i->jumpstack[cpu] == NULL)
1130 * Freeing will be done later on by the callers. The
1131 * chain is: xt_replace_table -> __do_replace ->
1132 * do_replace -> xt_free_table_info.
1140 struct xt_table_info *
1141 xt_replace_table(struct xt_table *table,
1142 unsigned int num_counters,
1143 struct xt_table_info *newinfo,
1146 struct xt_table_info *private;
1149 ret = xt_jumpstack_alloc(newinfo);
1155 /* Do the substitution. */
1157 private = table->private;
1159 /* Check inside lock: is the old number correct? */
1160 if (num_counters != private->number) {
1161 pr_debug("num_counters != table->private->number (%u/%u)\n",
1162 num_counters, private->number);
1168 newinfo->initial_entries = private->initial_entries;
1170 * Ensure contents of newinfo are visible before assigning to
1174 table->private = newinfo;
1176 /* make sure all cpus see new ->private value */
1180 * Even though table entries have now been swapped, other CPU's
1181 * may still be using the old entries. This is okay, because
1182 * resynchronization happens because of the locking done
1183 * during the get_counters() routine.
1188 if (audit_enabled) {
1189 struct audit_buffer *ab;
1191 ab = audit_log_start(current->audit_context, GFP_KERNEL,
1192 AUDIT_NETFILTER_CFG);
1194 audit_log_format(ab, "table=%s family=%u entries=%u",
1195 table->name, table->af,
1204 EXPORT_SYMBOL_GPL(xt_replace_table);
1206 struct xt_table *xt_register_table(struct net *net,
1207 const struct xt_table *input_table,
1208 struct xt_table_info *bootstrap,
1209 struct xt_table_info *newinfo)
1212 struct xt_table_info *private;
1213 struct xt_table *t, *table;
1215 /* Don't add one object to multiple lists. */
1216 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1222 mutex_lock(&xt[table->af].mutex);
1223 /* Don't autoload: we'd eat our tail... */
1224 list_for_each_entry(t, &net->xt.tables[table->af], list) {
1225 if (strcmp(t->name, table->name) == 0) {
1231 /* Simplifies replace_table code. */
1232 table->private = bootstrap;
1234 if (!xt_replace_table(table, 0, newinfo, &ret))
1237 private = table->private;
1238 pr_debug("table->private->number = %u\n", private->number);
1240 /* save number of initial entries */
1241 private->initial_entries = private->number;
1243 list_add(&table->list, &net->xt.tables[table->af]);
1244 mutex_unlock(&xt[table->af].mutex);
1248 mutex_unlock(&xt[table->af].mutex);
1251 return ERR_PTR(ret);
1253 EXPORT_SYMBOL_GPL(xt_register_table);
1255 void *xt_unregister_table(struct xt_table *table)
1257 struct xt_table_info *private;
1259 mutex_lock(&xt[table->af].mutex);
1260 private = table->private;
1261 list_del(&table->list);
1262 mutex_unlock(&xt[table->af].mutex);
1267 EXPORT_SYMBOL_GPL(xt_unregister_table);
1269 #ifdef CONFIG_PROC_FS
1270 struct xt_names_priv {
1271 struct seq_net_private p;
1274 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1276 struct xt_names_priv *priv = seq->private;
1277 struct net *net = seq_file_net(seq);
1278 u_int8_t af = priv->af;
1280 mutex_lock(&xt[af].mutex);
1281 return seq_list_start(&net->xt.tables[af], *pos);
1284 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1286 struct xt_names_priv *priv = seq->private;
1287 struct net *net = seq_file_net(seq);
1288 u_int8_t af = priv->af;
1290 return seq_list_next(v, &net->xt.tables[af], pos);
1293 static void xt_table_seq_stop(struct seq_file *seq, void *v)
1295 struct xt_names_priv *priv = seq->private;
1296 u_int8_t af = priv->af;
1298 mutex_unlock(&xt[af].mutex);
1301 static int xt_table_seq_show(struct seq_file *seq, void *v)
1303 struct xt_table *table = list_entry(v, struct xt_table, list);
1306 seq_printf(seq, "%s\n", table->name);
1310 static const struct seq_operations xt_table_seq_ops = {
1311 .start = xt_table_seq_start,
1312 .next = xt_table_seq_next,
1313 .stop = xt_table_seq_stop,
1314 .show = xt_table_seq_show,
1317 static int xt_table_open(struct inode *inode, struct file *file)
1320 struct xt_names_priv *priv;
1322 ret = seq_open_net(inode, file, &xt_table_seq_ops,
1323 sizeof(struct xt_names_priv));
1325 priv = ((struct seq_file *)file->private_data)->private;
1326 priv->af = (unsigned long)PDE_DATA(inode);
1331 static const struct file_operations xt_table_ops = {
1332 .owner = THIS_MODULE,
1333 .open = xt_table_open,
1335 .llseek = seq_lseek,
1336 .release = seq_release_net,
1340 * Traverse state for ip{,6}_{tables,matches} for helping crossing
1341 * the multi-AF mutexes.
1343 struct nf_mttg_trav {
1344 struct list_head *head, *curr;
1345 uint8_t class, nfproto;
1350 MTTG_TRAV_NFP_UNSPEC,
1355 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1358 static const uint8_t next_class[] = {
1359 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1360 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1362 struct nf_mttg_trav *trav = seq->private;
1364 switch (trav->class) {
1365 case MTTG_TRAV_INIT:
1366 trav->class = MTTG_TRAV_NFP_UNSPEC;
1367 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1368 trav->head = trav->curr = is_target ?
1369 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1371 case MTTG_TRAV_NFP_UNSPEC:
1372 trav->curr = trav->curr->next;
1373 if (trav->curr != trav->head)
1375 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1376 mutex_lock(&xt[trav->nfproto].mutex);
1377 trav->head = trav->curr = is_target ?
1378 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
1379 trav->class = next_class[trav->class];
1381 case MTTG_TRAV_NFP_SPEC:
1382 trav->curr = trav->curr->next;
1383 if (trav->curr != trav->head)
1385 /* fallthru, _stop will unlock */
1395 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1398 struct nf_mttg_trav *trav = seq->private;
1401 trav->class = MTTG_TRAV_INIT;
1402 for (j = 0; j < *pos; ++j)
1403 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1408 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1410 struct nf_mttg_trav *trav = seq->private;
1412 switch (trav->class) {
1413 case MTTG_TRAV_NFP_UNSPEC:
1414 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1416 case MTTG_TRAV_NFP_SPEC:
1417 mutex_unlock(&xt[trav->nfproto].mutex);
1422 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1424 return xt_mttg_seq_start(seq, pos, false);
1427 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1429 return xt_mttg_seq_next(seq, v, ppos, false);
1432 static int xt_match_seq_show(struct seq_file *seq, void *v)
1434 const struct nf_mttg_trav *trav = seq->private;
1435 const struct xt_match *match;
1437 switch (trav->class) {
1438 case MTTG_TRAV_NFP_UNSPEC:
1439 case MTTG_TRAV_NFP_SPEC:
1440 if (trav->curr == trav->head)
1442 match = list_entry(trav->curr, struct xt_match, list);
1444 seq_printf(seq, "%s\n", match->name);
1449 static const struct seq_operations xt_match_seq_ops = {
1450 .start = xt_match_seq_start,
1451 .next = xt_match_seq_next,
1452 .stop = xt_mttg_seq_stop,
1453 .show = xt_match_seq_show,
1456 static int xt_match_open(struct inode *inode, struct file *file)
1458 struct nf_mttg_trav *trav;
1459 trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav));
1463 trav->nfproto = (unsigned long)PDE_DATA(inode);
1467 static const struct file_operations xt_match_ops = {
1468 .owner = THIS_MODULE,
1469 .open = xt_match_open,
1471 .llseek = seq_lseek,
1472 .release = seq_release_private,
1475 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1477 return xt_mttg_seq_start(seq, pos, true);
1480 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1482 return xt_mttg_seq_next(seq, v, ppos, true);
1485 static int xt_target_seq_show(struct seq_file *seq, void *v)
1487 const struct nf_mttg_trav *trav = seq->private;
1488 const struct xt_target *target;
1490 switch (trav->class) {
1491 case MTTG_TRAV_NFP_UNSPEC:
1492 case MTTG_TRAV_NFP_SPEC:
1493 if (trav->curr == trav->head)
1495 target = list_entry(trav->curr, struct xt_target, list);
1497 seq_printf(seq, "%s\n", target->name);
1502 static const struct seq_operations xt_target_seq_ops = {
1503 .start = xt_target_seq_start,
1504 .next = xt_target_seq_next,
1505 .stop = xt_mttg_seq_stop,
1506 .show = xt_target_seq_show,
1509 static int xt_target_open(struct inode *inode, struct file *file)
1511 struct nf_mttg_trav *trav;
1512 trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav));
1516 trav->nfproto = (unsigned long)PDE_DATA(inode);
1520 static const struct file_operations xt_target_ops = {
1521 .owner = THIS_MODULE,
1522 .open = xt_target_open,
1524 .llseek = seq_lseek,
1525 .release = seq_release_private,
1528 #define FORMAT_TABLES "_tables_names"
1529 #define FORMAT_MATCHES "_tables_matches"
1530 #define FORMAT_TARGETS "_tables_targets"
1532 #endif /* CONFIG_PROC_FS */
1535 * xt_hook_ops_alloc - set up hooks for a new table
1536 * @table: table with metadata needed to set up hooks
1537 * @fn: Hook function
1539 * This function will create the nf_hook_ops that the x_table needs
1540 * to hand to xt_hook_link_net().
1542 struct nf_hook_ops *
1543 xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1545 unsigned int hook_mask = table->valid_hooks;
1546 uint8_t i, num_hooks = hweight32(hook_mask);
1548 struct nf_hook_ops *ops;
1551 return ERR_PTR(-EINVAL);
1553 ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1555 return ERR_PTR(-ENOMEM);
1557 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1558 hook_mask >>= 1, ++hooknum) {
1559 if (!(hook_mask & 1))
1562 ops[i].pf = table->af;
1563 ops[i].hooknum = hooknum;
1564 ops[i].priority = table->priority;
1570 EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1572 int xt_proto_init(struct net *net, u_int8_t af)
1574 #ifdef CONFIG_PROC_FS
1575 char buf[XT_FUNCTION_MAXNAMELEN];
1576 struct proc_dir_entry *proc;
1581 if (af >= ARRAY_SIZE(xt_prefix))
1585 #ifdef CONFIG_PROC_FS
1586 root_uid = make_kuid(net->user_ns, 0);
1587 root_gid = make_kgid(net->user_ns, 0);
1589 strlcpy(buf, xt_prefix[af], sizeof(buf));
1590 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1591 proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1592 (void *)(unsigned long)af);
1595 if (uid_valid(root_uid) && gid_valid(root_gid))
1596 proc_set_user(proc, root_uid, root_gid);
1598 strlcpy(buf, xt_prefix[af], sizeof(buf));
1599 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1600 proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1601 (void *)(unsigned long)af);
1603 goto out_remove_tables;
1604 if (uid_valid(root_uid) && gid_valid(root_gid))
1605 proc_set_user(proc, root_uid, root_gid);
1607 strlcpy(buf, xt_prefix[af], sizeof(buf));
1608 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1609 proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1610 (void *)(unsigned long)af);
1612 goto out_remove_matches;
1613 if (uid_valid(root_uid) && gid_valid(root_gid))
1614 proc_set_user(proc, root_uid, root_gid);
1619 #ifdef CONFIG_PROC_FS
1621 strlcpy(buf, xt_prefix[af], sizeof(buf));
1622 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1623 remove_proc_entry(buf, net->proc_net);
1626 strlcpy(buf, xt_prefix[af], sizeof(buf));
1627 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1628 remove_proc_entry(buf, net->proc_net);
1633 EXPORT_SYMBOL_GPL(xt_proto_init);
1635 void xt_proto_fini(struct net *net, u_int8_t af)
1637 #ifdef CONFIG_PROC_FS
1638 char buf[XT_FUNCTION_MAXNAMELEN];
1640 strlcpy(buf, xt_prefix[af], sizeof(buf));
1641 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1642 remove_proc_entry(buf, net->proc_net);
1644 strlcpy(buf, xt_prefix[af], sizeof(buf));
1645 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1646 remove_proc_entry(buf, net->proc_net);
1648 strlcpy(buf, xt_prefix[af], sizeof(buf));
1649 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1650 remove_proc_entry(buf, net->proc_net);
1651 #endif /*CONFIG_PROC_FS*/
1653 EXPORT_SYMBOL_GPL(xt_proto_fini);
1656 * xt_percpu_counter_alloc - allocate x_tables rule counter
1658 * @state: pointer to xt_percpu allocation state
1659 * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1661 * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
1662 * contain the address of the real (percpu) counter.
1664 * Rule evaluation needs to use xt_get_this_cpu_counter() helper
1665 * to fetch the real percpu counter.
1667 * To speed up allocation and improve data locality, a 4kb block is
1670 * xt_percpu_counter_alloc_state contains the base address of the
1671 * allocated page and the current sub-offset.
1673 * returns false on error.
1675 bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1676 struct xt_counters *counter)
1678 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1680 if (nr_cpu_ids <= 1)
1684 state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1685 XT_PCPU_BLOCK_SIZE);
1689 counter->pcnt = (__force unsigned long)(state->mem + state->off);
1690 state->off += sizeof(*counter);
1691 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1697 EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1699 void xt_percpu_counter_free(struct xt_counters *counters)
1701 unsigned long pcnt = counters->pcnt;
1703 if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1704 free_percpu((void __percpu *)pcnt);
1706 EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1708 static int __net_init xt_net_init(struct net *net)
1712 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1713 INIT_LIST_HEAD(&net->xt.tables[i]);
1717 static struct pernet_operations xt_net_ops = {
1718 .init = xt_net_init,
1721 static int __init xt_init(void)
1726 for_each_possible_cpu(i) {
1727 seqcount_init(&per_cpu(xt_recseq, i));
1730 xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1734 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1735 mutex_init(&xt[i].mutex);
1736 #ifdef CONFIG_COMPAT
1737 mutex_init(&xt[i].compat_mutex);
1738 xt[i].compat_tab = NULL;
1740 INIT_LIST_HEAD(&xt[i].target);
1741 INIT_LIST_HEAD(&xt[i].match);
1743 rv = register_pernet_subsys(&xt_net_ops);
1749 static void __exit xt_fini(void)
1751 unregister_pernet_subsys(&xt_net_ops);
1755 module_init(xt_init);
1756 module_exit(xt_fini);