2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
42 /*#define DEBUG_IP_FIREWALL*/
43 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
44 /*#define DEBUG_IP_FIREWALL_USER*/
46 #ifdef DEBUG_IP_FIREWALL
47 #define dprintf(format, args...) pr_info(format , ## args)
49 #define dprintf(format, args...)
52 #ifdef DEBUG_IP_FIREWALL_USER
53 #define duprintf(format, args...) pr_info(format , ## args)
55 #define duprintf(format, args...)
58 #ifdef CONFIG_NETFILTER_DEBUG
59 #define IP_NF_ASSERT(x) WARN_ON(!(x))
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
70 void *ip6t_alloc_initial_table(const struct xt_table *info)
72 return xt_alloc_initial_table(ip6t, IP6T);
74 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
77 We keep a set of rules for each CPU, so we can avoid write-locking
78 them in the softirq when updating the counters and therefore
79 only need to read-lock in the softirq; doing a write_lock_bh() in user
80 context stops packets coming through and allows user context to read
81 the counters or update the rules.
83 Hence the start of any table is given by get_table() below. */
85 /* Returns whether matches rule or not. */
86 /* Performance critical - called for every packet */
88 ip6_packet_match(const struct sk_buff *skb,
91 const struct ip6t_ip6 *ip6info,
92 unsigned int *protoff,
93 int *fragoff, bool *hotdrop)
96 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
98 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
100 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
101 &ip6info->src), IP6T_INV_SRCIP) ||
102 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
103 &ip6info->dst), IP6T_INV_DSTIP)) {
104 dprintf("Source or dest mismatch.\n");
106 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
107 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
108 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
109 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
110 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
111 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
115 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
117 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
118 dprintf("VIA in mismatch (%s vs %s).%s\n",
119 indev, ip6info->iniface,
120 ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : "");
124 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
126 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
127 dprintf("VIA out mismatch (%s vs %s).%s\n",
128 outdev, ip6info->outiface,
129 ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : "");
133 /* ... might want to do something with class and flowlabel here ... */
135 /* look for the desired protocol header */
136 if (ip6info->flags & IP6T_F_PROTO) {
138 unsigned short _frag_off;
140 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
146 *fragoff = _frag_off;
148 dprintf("Packet protocol %hi ?= %s%hi.\n",
150 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
153 if (ip6info->proto == protohdr) {
154 if (ip6info->invflags & IP6T_INV_PROTO)
160 /* We need match for the '-p all', too! */
161 if ((ip6info->proto != 0) &&
162 !(ip6info->invflags & IP6T_INV_PROTO))
168 /* should be ip6 safe */
170 ip6_checkentry(const struct ip6t_ip6 *ipv6)
172 if (ipv6->flags & ~IP6T_F_MASK) {
173 duprintf("Unknown flag bits set: %08X\n",
174 ipv6->flags & ~IP6T_F_MASK);
177 if (ipv6->invflags & ~IP6T_INV_MASK) {
178 duprintf("Unknown invflag bits set: %08X\n",
179 ipv6->invflags & ~IP6T_INV_MASK);
186 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
188 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
193 static inline struct ip6t_entry *
194 get_entry(const void *base, unsigned int offset)
196 return (struct ip6t_entry *)(base + offset);
199 /* All zeroes == unconditional rule. */
200 /* Mildly perf critical (only if packet tracing is on) */
201 static inline bool unconditional(const struct ip6t_entry *e)
203 static const struct ip6t_ip6 uncond;
205 return e->target_offset == sizeof(struct ip6t_entry) &&
206 memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
209 static inline const struct xt_entry_target *
210 ip6t_get_target_c(const struct ip6t_entry *e)
212 return ip6t_get_target((struct ip6t_entry *)e);
215 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
216 /* This cries for unification! */
217 static const char *const hooknames[] = {
218 [NF_INET_PRE_ROUTING] = "PREROUTING",
219 [NF_INET_LOCAL_IN] = "INPUT",
220 [NF_INET_FORWARD] = "FORWARD",
221 [NF_INET_LOCAL_OUT] = "OUTPUT",
222 [NF_INET_POST_ROUTING] = "POSTROUTING",
225 enum nf_ip_trace_comments {
226 NF_IP6_TRACE_COMMENT_RULE,
227 NF_IP6_TRACE_COMMENT_RETURN,
228 NF_IP6_TRACE_COMMENT_POLICY,
231 static const char *const comments[] = {
232 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
233 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
234 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
237 static struct nf_loginfo trace_loginfo = {
238 .type = NF_LOG_TYPE_LOG,
241 .level = LOGLEVEL_WARNING,
242 .logflags = NF_LOG_MASK,
247 /* Mildly perf critical (only if packet tracing is on) */
249 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
250 const char *hookname, const char **chainname,
251 const char **comment, unsigned int *rulenum)
253 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
255 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
256 /* Head of user chain: ERROR target with chainname */
257 *chainname = t->target.data;
262 if (unconditional(s) &&
263 strcmp(t->target.u.kernel.target->name,
264 XT_STANDARD_TARGET) == 0 &&
266 /* Tail of chains: STANDARD target (return/policy) */
267 *comment = *chainname == hookname
268 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
269 : comments[NF_IP6_TRACE_COMMENT_RETURN];
278 static void trace_packet(struct net *net,
279 const struct sk_buff *skb,
281 const struct net_device *in,
282 const struct net_device *out,
283 const char *tablename,
284 const struct xt_table_info *private,
285 const struct ip6t_entry *e)
287 const struct ip6t_entry *root;
288 const char *hookname, *chainname, *comment;
289 const struct ip6t_entry *iter;
290 unsigned int rulenum = 0;
292 root = get_entry(private->entries, private->hook_entry[hook]);
294 hookname = chainname = hooknames[hook];
295 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
297 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
298 if (get_chainname_rulenum(iter, e, hookname,
299 &chainname, &comment, &rulenum) != 0)
302 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
303 "TRACE: %s:%s:%s:%u ",
304 tablename, chainname, comment, rulenum);
308 static inline struct ip6t_entry *
309 ip6t_next_entry(const struct ip6t_entry *entry)
311 return (void *)entry + entry->next_offset;
314 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
316 ip6t_do_table(struct sk_buff *skb,
317 const struct nf_hook_state *state,
318 struct xt_table *table)
320 unsigned int hook = state->hook;
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int stackidx, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
334 indev = state->in ? state->in->name : nulldevname;
335 outdev = state->out ? state->out->name : nulldevname;
336 /* We handle fragments by dealing with the first fragment as
337 * if it was a normal packet. All other fragments are treated
338 * normally, except that they will NEVER match rules that ask
339 * things we don't know, ie. tcp syn flag or ports). If the
340 * rule is also a fragment-specific rule, non-fragments won't
343 acpar.hotdrop = false;
344 acpar.net = state->net;
345 acpar.in = state->in;
346 acpar.out = state->out;
347 acpar.family = NFPROTO_IPV6;
348 acpar.hooknum = hook;
350 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
353 addend = xt_write_recseq_begin();
354 private = table->private;
356 * Ensure we load private-> members after we've fetched the base
359 smp_read_barrier_depends();
360 cpu = smp_processor_id();
361 table_base = private->entries;
362 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
364 /* Switch to alternate jumpstack if we're being invoked via TEE.
365 * TEE issues XT_CONTINUE verdict on original skb so we must not
366 * clobber the jumpstack.
368 * For recursion via REJECT or SYNPROXY the stack will be clobbered
369 * but it is no problem since absolute verdict is issued by these.
371 if (static_key_false(&xt_tee_enabled))
372 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
374 e = get_entry(table_base, private->hook_entry[hook]);
377 const struct xt_entry_target *t;
378 const struct xt_entry_match *ematch;
379 struct xt_counters *counter;
383 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
384 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
386 e = ip6t_next_entry(e);
390 xt_ematch_foreach(ematch, e) {
391 acpar.match = ematch->u.kernel.match;
392 acpar.matchinfo = ematch->data;
393 if (!acpar.match->match(skb, &acpar))
397 counter = xt_get_this_cpu_counter(&e->counters);
398 ADD_COUNTER(*counter, skb->len, 1);
400 t = ip6t_get_target_c(e);
401 IP_NF_ASSERT(t->u.kernel.target);
403 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
404 /* The packet is traced: log it */
405 if (unlikely(skb->nf_trace))
406 trace_packet(state->net, skb, hook, state->in,
407 state->out, table->name, private, e);
409 /* Standard target? */
410 if (!t->u.kernel.target->target) {
413 v = ((struct xt_standard_target *)t)->verdict;
415 /* Pop from stack? */
416 if (v != XT_RETURN) {
417 verdict = (unsigned int)(-v) - 1;
421 e = get_entry(table_base,
422 private->underflow[hook]);
424 e = ip6t_next_entry(jumpstack[--stackidx]);
427 if (table_base + v != ip6t_next_entry(e) &&
428 !(e->ipv6.flags & IP6T_F_GOTO)) {
429 if (unlikely(stackidx >= private->stacksize)) {
433 jumpstack[stackidx++] = e;
436 e = get_entry(table_base, v);
440 acpar.target = t->u.kernel.target;
441 acpar.targinfo = t->data;
443 verdict = t->u.kernel.target->target(skb, &acpar);
444 if (verdict == XT_CONTINUE)
445 e = ip6t_next_entry(e);
449 } while (!acpar.hotdrop);
451 xt_write_recseq_end(addend);
454 #ifdef DEBUG_ALLOW_ALL
463 /* Figures out from what hook each rule can be called: returns 0 if
464 there are loops. Puts hook bitmask in comefrom. */
466 mark_source_chains(const struct xt_table_info *newinfo,
467 unsigned int valid_hooks, void *entry0,
468 unsigned int *offsets)
472 /* No recursion; use packet counter to save back ptrs (reset
473 to 0 as we leave), and comefrom to save source hook bitmask */
474 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
475 unsigned int pos = newinfo->hook_entry[hook];
476 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
478 if (!(valid_hooks & (1 << hook)))
481 /* Set initial back pointer. */
482 e->counters.pcnt = pos;
485 const struct xt_standard_target *t
486 = (void *)ip6t_get_target_c(e);
487 int visited = e->comefrom & (1 << hook);
489 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
490 pr_err("iptables: loop hook %u pos %u %08X.\n",
491 hook, pos, e->comefrom);
494 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
496 /* Unconditional return/END. */
497 if ((unconditional(e) &&
498 (strcmp(t->target.u.user.name,
499 XT_STANDARD_TARGET) == 0) &&
500 t->verdict < 0) || visited) {
501 unsigned int oldpos, size;
503 if ((strcmp(t->target.u.user.name,
504 XT_STANDARD_TARGET) == 0) &&
505 t->verdict < -NF_MAX_VERDICT - 1) {
506 duprintf("mark_source_chains: bad "
507 "negative verdict (%i)\n",
512 /* Return: backtrack through the last
515 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
516 #ifdef DEBUG_IP_FIREWALL_USER
518 & (1 << NF_INET_NUMHOOKS)) {
519 duprintf("Back unset "
526 pos = e->counters.pcnt;
527 e->counters.pcnt = 0;
529 /* We're at the start. */
533 e = (struct ip6t_entry *)
535 } while (oldpos == pos + e->next_offset);
538 size = e->next_offset;
539 e = (struct ip6t_entry *)
540 (entry0 + pos + size);
541 if (pos + size >= newinfo->size)
543 e->counters.pcnt = pos;
546 int newpos = t->verdict;
548 if (strcmp(t->target.u.user.name,
549 XT_STANDARD_TARGET) == 0 &&
551 if (newpos > newinfo->size -
552 sizeof(struct ip6t_entry)) {
553 duprintf("mark_source_chains: "
554 "bad verdict (%i)\n",
558 /* This a jump; chase it. */
559 duprintf("Jump rule %u -> %u\n",
561 if (!xt_find_jump_offset(offsets, newpos,
564 e = (struct ip6t_entry *)
567 /* ... this is a fallthru */
568 newpos = pos + e->next_offset;
569 if (newpos >= newinfo->size)
572 e = (struct ip6t_entry *)
574 e->counters.pcnt = pos;
579 duprintf("Finished chain %u\n", hook);
584 static void cleanup_match(struct xt_entry_match *m, struct net *net)
586 struct xt_mtdtor_param par;
589 par.match = m->u.kernel.match;
590 par.matchinfo = m->data;
591 par.family = NFPROTO_IPV6;
592 if (par.match->destroy != NULL)
593 par.match->destroy(&par);
594 module_put(par.match->me);
597 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
599 const struct ip6t_ip6 *ipv6 = par->entryinfo;
602 par->match = m->u.kernel.match;
603 par->matchinfo = m->data;
605 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
606 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
608 duprintf("ip_tables: check failed for `%s'.\n",
616 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
618 struct xt_match *match;
621 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
624 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
625 return PTR_ERR(match);
627 m->u.kernel.match = match;
629 ret = check_match(m, par);
635 module_put(m->u.kernel.match->me);
639 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
641 struct xt_entry_target *t = ip6t_get_target(e);
642 struct xt_tgchk_param par = {
646 .target = t->u.kernel.target,
648 .hook_mask = e->comefrom,
649 .family = NFPROTO_IPV6,
653 t = ip6t_get_target(e);
654 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
655 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
657 duprintf("ip_tables: check failed for `%s'.\n",
658 t->u.kernel.target->name);
665 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
667 struct xt_percpu_counter_alloc_state *alloc_state)
669 struct xt_entry_target *t;
670 struct xt_target *target;
673 struct xt_mtchk_param mtpar;
674 struct xt_entry_match *ematch;
676 if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
680 memset(&mtpar, 0, sizeof(mtpar));
683 mtpar.entryinfo = &e->ipv6;
684 mtpar.hook_mask = e->comefrom;
685 mtpar.family = NFPROTO_IPV6;
686 xt_ematch_foreach(ematch, e) {
687 ret = find_check_match(ematch, &mtpar);
689 goto cleanup_matches;
693 t = ip6t_get_target(e);
694 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
696 if (IS_ERR(target)) {
697 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
698 ret = PTR_ERR(target);
699 goto cleanup_matches;
701 t->u.kernel.target = target;
703 ret = check_target(e, net, name);
708 module_put(t->u.kernel.target->me);
710 xt_ematch_foreach(ematch, e) {
713 cleanup_match(ematch, net);
716 xt_percpu_counter_free(&e->counters);
721 static bool check_underflow(const struct ip6t_entry *e)
723 const struct xt_entry_target *t;
724 unsigned int verdict;
726 if (!unconditional(e))
728 t = ip6t_get_target_c(e);
729 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
731 verdict = ((struct xt_standard_target *)t)->verdict;
732 verdict = -verdict - 1;
733 return verdict == NF_DROP || verdict == NF_ACCEPT;
737 check_entry_size_and_hooks(struct ip6t_entry *e,
738 struct xt_table_info *newinfo,
739 const unsigned char *base,
740 const unsigned char *limit,
741 const unsigned int *hook_entries,
742 const unsigned int *underflows,
743 unsigned int valid_hooks)
748 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
749 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
750 (unsigned char *)e + e->next_offset > limit) {
751 duprintf("Bad offset %p\n", e);
756 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
757 duprintf("checking: element %p size %u\n",
762 if (!ip6_checkentry(&e->ipv6))
765 err = xt_check_entry_offsets(e, e->elems, e->target_offset,
770 /* Check hooks & underflows */
771 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
772 if (!(valid_hooks & (1 << h)))
774 if ((unsigned char *)e - base == hook_entries[h])
775 newinfo->hook_entry[h] = hook_entries[h];
776 if ((unsigned char *)e - base == underflows[h]) {
777 if (!check_underflow(e)) {
778 pr_debug("Underflows must be unconditional and "
779 "use the STANDARD target with "
783 newinfo->underflow[h] = underflows[h];
787 /* Clear counters and comefrom */
788 e->counters = ((struct xt_counters) { 0, 0 });
793 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
795 struct xt_tgdtor_param par;
796 struct xt_entry_target *t;
797 struct xt_entry_match *ematch;
799 /* Cleanup all matches */
800 xt_ematch_foreach(ematch, e)
801 cleanup_match(ematch, net);
802 t = ip6t_get_target(e);
805 par.target = t->u.kernel.target;
806 par.targinfo = t->data;
807 par.family = NFPROTO_IPV6;
808 if (par.target->destroy != NULL)
809 par.target->destroy(&par);
810 module_put(par.target->me);
811 xt_percpu_counter_free(&e->counters);
814 /* Checks and translates the user-supplied table segment (held in
817 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
818 const struct ip6t_replace *repl)
820 struct xt_percpu_counter_alloc_state alloc_state = { 0 };
821 struct ip6t_entry *iter;
822 unsigned int *offsets;
826 newinfo->size = repl->size;
827 newinfo->number = repl->num_entries;
829 /* Init all hooks to impossible value. */
830 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
831 newinfo->hook_entry[i] = 0xFFFFFFFF;
832 newinfo->underflow[i] = 0xFFFFFFFF;
835 duprintf("translate_table: size %u\n", newinfo->size);
836 offsets = xt_alloc_entry_offsets(newinfo->number);
840 /* Walk through entries, checking offsets. */
841 xt_entry_foreach(iter, entry0, newinfo->size) {
842 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
849 if (i < repl->num_entries)
850 offsets[i] = (void *)iter - entry0;
852 if (strcmp(ip6t_get_target(iter)->u.user.name,
853 XT_ERROR_TARGET) == 0)
854 ++newinfo->stacksize;
858 if (i != repl->num_entries) {
859 duprintf("translate_table: %u not %u entries\n",
860 i, repl->num_entries);
864 /* Check hooks all assigned */
865 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
866 /* Only hooks which are valid */
867 if (!(repl->valid_hooks & (1 << i)))
869 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
870 duprintf("Invalid hook entry %u %u\n",
871 i, repl->hook_entry[i]);
874 if (newinfo->underflow[i] == 0xFFFFFFFF) {
875 duprintf("Invalid underflow %u %u\n",
876 i, repl->underflow[i]);
881 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
887 /* Finally, each sanity check must pass */
889 xt_entry_foreach(iter, entry0, newinfo->size) {
890 ret = find_check_entry(iter, net, repl->name, repl->size,
898 xt_entry_foreach(iter, entry0, newinfo->size) {
901 cleanup_entry(iter, net);
913 get_counters(const struct xt_table_info *t,
914 struct xt_counters counters[])
916 struct ip6t_entry *iter;
920 for_each_possible_cpu(cpu) {
921 seqcount_t *s = &per_cpu(xt_recseq, cpu);
924 xt_entry_foreach(iter, t->entries, t->size) {
925 struct xt_counters *tmp;
929 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
931 start = read_seqcount_begin(s);
934 } while (read_seqcount_retry(s, start));
936 ADD_COUNTER(counters[i], bcnt, pcnt);
942 static struct xt_counters *alloc_counters(const struct xt_table *table)
944 unsigned int countersize;
945 struct xt_counters *counters;
946 const struct xt_table_info *private = table->private;
948 /* We need atomic snapshot of counters: rest doesn't change
949 (other than comefrom, which userspace doesn't care
951 countersize = sizeof(struct xt_counters) * private->number;
952 counters = vzalloc(countersize);
954 if (counters == NULL)
955 return ERR_PTR(-ENOMEM);
957 get_counters(private, counters);
963 copy_entries_to_user(unsigned int total_size,
964 const struct xt_table *table,
965 void __user *userptr)
967 unsigned int off, num;
968 const struct ip6t_entry *e;
969 struct xt_counters *counters;
970 const struct xt_table_info *private = table->private;
972 const void *loc_cpu_entry;
974 counters = alloc_counters(table);
975 if (IS_ERR(counters))
976 return PTR_ERR(counters);
978 loc_cpu_entry = private->entries;
979 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
984 /* FIXME: use iterator macros --RR */
985 /* ... then go back and fix counters and names */
986 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
988 const struct xt_entry_match *m;
989 const struct xt_entry_target *t;
991 e = (struct ip6t_entry *)(loc_cpu_entry + off);
992 if (copy_to_user(userptr + off
993 + offsetof(struct ip6t_entry, counters),
995 sizeof(counters[num])) != 0) {
1000 for (i = sizeof(struct ip6t_entry);
1001 i < e->target_offset;
1002 i += m->u.match_size) {
1005 if (copy_to_user(userptr + off + i
1006 + offsetof(struct xt_entry_match,
1008 m->u.kernel.match->name,
1009 strlen(m->u.kernel.match->name)+1)
1016 t = ip6t_get_target_c(e);
1017 if (copy_to_user(userptr + off + e->target_offset
1018 + offsetof(struct xt_entry_target,
1020 t->u.kernel.target->name,
1021 strlen(t->u.kernel.target->name)+1) != 0) {
1032 #ifdef CONFIG_COMPAT
1033 static void compat_standard_from_user(void *dst, const void *src)
1035 int v = *(compat_int_t *)src;
1038 v += xt_compat_calc_jump(AF_INET6, v);
1039 memcpy(dst, &v, sizeof(v));
1042 static int compat_standard_to_user(void __user *dst, const void *src)
1044 compat_int_t cv = *(int *)src;
1047 cv -= xt_compat_calc_jump(AF_INET6, cv);
1048 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1051 static int compat_calc_entry(const struct ip6t_entry *e,
1052 const struct xt_table_info *info,
1053 const void *base, struct xt_table_info *newinfo)
1055 const struct xt_entry_match *ematch;
1056 const struct xt_entry_target *t;
1057 unsigned int entry_offset;
1060 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1061 entry_offset = (void *)e - base;
1062 xt_ematch_foreach(ematch, e)
1063 off += xt_compat_match_offset(ematch->u.kernel.match);
1064 t = ip6t_get_target_c(e);
1065 off += xt_compat_target_offset(t->u.kernel.target);
1066 newinfo->size -= off;
1067 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1071 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1072 if (info->hook_entry[i] &&
1073 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1074 newinfo->hook_entry[i] -= off;
1075 if (info->underflow[i] &&
1076 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1077 newinfo->underflow[i] -= off;
1082 static int compat_table_info(const struct xt_table_info *info,
1083 struct xt_table_info *newinfo)
1085 struct ip6t_entry *iter;
1086 const void *loc_cpu_entry;
1089 if (!newinfo || !info)
1092 /* we dont care about newinfo->entries */
1093 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1094 newinfo->initial_entries = 0;
1095 loc_cpu_entry = info->entries;
1096 xt_compat_init_offsets(AF_INET6, info->number);
1097 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1098 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1106 static int get_info(struct net *net, void __user *user,
1107 const int *len, int compat)
1109 char name[XT_TABLE_MAXNAMELEN];
1113 if (*len != sizeof(struct ip6t_getinfo)) {
1114 duprintf("length %u != %zu\n", *len,
1115 sizeof(struct ip6t_getinfo));
1119 if (copy_from_user(name, user, sizeof(name)) != 0)
1122 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1123 #ifdef CONFIG_COMPAT
1125 xt_compat_lock(AF_INET6);
1127 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1128 "ip6table_%s", name);
1129 if (!IS_ERR_OR_NULL(t)) {
1130 struct ip6t_getinfo info;
1131 const struct xt_table_info *private = t->private;
1132 #ifdef CONFIG_COMPAT
1133 struct xt_table_info tmp;
1136 ret = compat_table_info(private, &tmp);
1137 xt_compat_flush_offsets(AF_INET6);
1141 memset(&info, 0, sizeof(info));
1142 info.valid_hooks = t->valid_hooks;
1143 memcpy(info.hook_entry, private->hook_entry,
1144 sizeof(info.hook_entry));
1145 memcpy(info.underflow, private->underflow,
1146 sizeof(info.underflow));
1147 info.num_entries = private->number;
1148 info.size = private->size;
1149 strcpy(info.name, name);
1151 if (copy_to_user(user, &info, *len) != 0)
1159 ret = t ? PTR_ERR(t) : -ENOENT;
1160 #ifdef CONFIG_COMPAT
1162 xt_compat_unlock(AF_INET6);
1168 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1172 struct ip6t_get_entries get;
1175 if (*len < sizeof(get)) {
1176 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1179 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1181 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1182 duprintf("get_entries: %u != %zu\n",
1183 *len, sizeof(get) + get.size);
1186 get.name[sizeof(get.name) - 1] = '\0';
1188 t = xt_find_table_lock(net, AF_INET6, get.name);
1189 if (!IS_ERR_OR_NULL(t)) {
1190 struct xt_table_info *private = t->private;
1191 duprintf("t->private->number = %u\n", private->number);
1192 if (get.size == private->size)
1193 ret = copy_entries_to_user(private->size,
1194 t, uptr->entrytable);
1196 duprintf("get_entries: I've got %u not %u!\n",
1197 private->size, get.size);
1203 ret = t ? PTR_ERR(t) : -ENOENT;
1209 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1210 struct xt_table_info *newinfo, unsigned int num_counters,
1211 void __user *counters_ptr)
1215 struct xt_table_info *oldinfo;
1216 struct xt_counters *counters;
1217 struct ip6t_entry *iter;
1220 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1226 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1227 "ip6table_%s", name);
1228 if (IS_ERR_OR_NULL(t)) {
1229 ret = t ? PTR_ERR(t) : -ENOENT;
1230 goto free_newinfo_counters_untrans;
1234 if (valid_hooks != t->valid_hooks) {
1235 duprintf("Valid hook crap: %08X vs %08X\n",
1236 valid_hooks, t->valid_hooks);
1241 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1245 /* Update module usage count based on number of rules */
1246 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1247 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1248 if ((oldinfo->number > oldinfo->initial_entries) ||
1249 (newinfo->number <= oldinfo->initial_entries))
1251 if ((oldinfo->number > oldinfo->initial_entries) &&
1252 (newinfo->number <= oldinfo->initial_entries))
1255 /* Get the old counters, and synchronize with replace */
1256 get_counters(oldinfo, counters);
1258 /* Decrease module usage counts and free resource */
1259 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1260 cleanup_entry(iter, net);
1262 xt_free_table_info(oldinfo);
1263 if (copy_to_user(counters_ptr, counters,
1264 sizeof(struct xt_counters) * num_counters) != 0) {
1265 /* Silent error, can't fail, new table is already in place */
1266 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1275 free_newinfo_counters_untrans:
1282 do_replace(struct net *net, const void __user *user, unsigned int len)
1285 struct ip6t_replace tmp;
1286 struct xt_table_info *newinfo;
1287 void *loc_cpu_entry;
1288 struct ip6t_entry *iter;
1290 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1293 /* overflow check */
1294 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1296 if (tmp.num_counters == 0)
1299 tmp.name[sizeof(tmp.name)-1] = 0;
1301 newinfo = xt_alloc_table_info(tmp.size);
1305 loc_cpu_entry = newinfo->entries;
1306 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1312 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1316 duprintf("ip_tables: Translated table\n");
1318 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1319 tmp.num_counters, tmp.counters);
1321 goto free_newinfo_untrans;
1324 free_newinfo_untrans:
1325 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1326 cleanup_entry(iter, net);
1328 xt_free_table_info(newinfo);
1333 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1337 struct xt_counters_info tmp;
1338 struct xt_counters *paddc;
1340 const struct xt_table_info *private;
1342 struct ip6t_entry *iter;
1343 unsigned int addend;
1345 paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
1347 return PTR_ERR(paddc);
1348 t = xt_find_table_lock(net, AF_INET6, tmp.name);
1349 if (IS_ERR_OR_NULL(t)) {
1350 ret = t ? PTR_ERR(t) : -ENOENT;
1355 private = t->private;
1356 if (private->number != tmp.num_counters) {
1358 goto unlock_up_free;
1362 addend = xt_write_recseq_begin();
1363 xt_entry_foreach(iter, private->entries, private->size) {
1364 struct xt_counters *tmp;
1366 tmp = xt_get_this_cpu_counter(&iter->counters);
1367 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1370 xt_write_recseq_end(addend);
1381 #ifdef CONFIG_COMPAT
1382 struct compat_ip6t_replace {
1383 char name[XT_TABLE_MAXNAMELEN];
1387 u32 hook_entry[NF_INET_NUMHOOKS];
1388 u32 underflow[NF_INET_NUMHOOKS];
1390 compat_uptr_t counters; /* struct xt_counters * */
1391 struct compat_ip6t_entry entries[0];
1395 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1396 unsigned int *size, struct xt_counters *counters,
1399 struct xt_entry_target *t;
1400 struct compat_ip6t_entry __user *ce;
1401 u_int16_t target_offset, next_offset;
1402 compat_uint_t origsize;
1403 const struct xt_entry_match *ematch;
1407 ce = (struct compat_ip6t_entry __user *)*dstptr;
1408 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1409 copy_to_user(&ce->counters, &counters[i],
1410 sizeof(counters[i])) != 0)
1413 *dstptr += sizeof(struct compat_ip6t_entry);
1414 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1416 xt_ematch_foreach(ematch, e) {
1417 ret = xt_compat_match_to_user(ematch, dstptr, size);
1421 target_offset = e->target_offset - (origsize - *size);
1422 t = ip6t_get_target(e);
1423 ret = xt_compat_target_to_user(t, dstptr, size);
1426 next_offset = e->next_offset - (origsize - *size);
1427 if (put_user(target_offset, &ce->target_offset) != 0 ||
1428 put_user(next_offset, &ce->next_offset) != 0)
1434 compat_find_calc_match(struct xt_entry_match *m,
1435 const struct ip6t_ip6 *ipv6,
1438 struct xt_match *match;
1440 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1441 m->u.user.revision);
1442 if (IS_ERR(match)) {
1443 duprintf("compat_check_calc_match: `%s' not found\n",
1445 return PTR_ERR(match);
1447 m->u.kernel.match = match;
1448 *size += xt_compat_match_offset(match);
1452 static void compat_release_entry(struct compat_ip6t_entry *e)
1454 struct xt_entry_target *t;
1455 struct xt_entry_match *ematch;
1457 /* Cleanup all matches */
1458 xt_ematch_foreach(ematch, e)
1459 module_put(ematch->u.kernel.match->me);
1460 t = compat_ip6t_get_target(e);
1461 module_put(t->u.kernel.target->me);
1465 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1466 struct xt_table_info *newinfo,
1468 const unsigned char *base,
1469 const unsigned char *limit)
1471 struct xt_entry_match *ematch;
1472 struct xt_entry_target *t;
1473 struct xt_target *target;
1474 unsigned int entry_offset;
1478 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1479 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1480 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1481 (unsigned char *)e + e->next_offset > limit) {
1482 duprintf("Bad offset %p, limit = %p\n", e, limit);
1486 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1487 sizeof(struct compat_xt_entry_target)) {
1488 duprintf("checking: element %p size %u\n",
1493 if (!ip6_checkentry(&e->ipv6))
1496 ret = xt_compat_check_entry_offsets(e, e->elems,
1497 e->target_offset, e->next_offset);
1501 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1502 entry_offset = (void *)e - (void *)base;
1504 xt_ematch_foreach(ematch, e) {
1505 ret = compat_find_calc_match(ematch, &e->ipv6, &off);
1507 goto release_matches;
1511 t = compat_ip6t_get_target(e);
1512 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1513 t->u.user.revision);
1514 if (IS_ERR(target)) {
1515 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1517 ret = PTR_ERR(target);
1518 goto release_matches;
1520 t->u.kernel.target = target;
1522 off += xt_compat_target_offset(target);
1524 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1531 module_put(t->u.kernel.target->me);
1533 xt_ematch_foreach(ematch, e) {
1536 module_put(ematch->u.kernel.match->me);
1542 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1544 struct xt_table_info *newinfo, unsigned char *base)
1546 struct xt_entry_target *t;
1547 struct ip6t_entry *de;
1548 unsigned int origsize;
1550 struct xt_entry_match *ematch;
1553 de = (struct ip6t_entry *)*dstptr;
1554 memcpy(de, e, sizeof(struct ip6t_entry));
1555 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1557 *dstptr += sizeof(struct ip6t_entry);
1558 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1560 xt_ematch_foreach(ematch, e)
1561 xt_compat_match_from_user(ematch, dstptr, size);
1563 de->target_offset = e->target_offset - (origsize - *size);
1564 t = compat_ip6t_get_target(e);
1565 xt_compat_target_from_user(t, dstptr, size);
1567 de->next_offset = e->next_offset - (origsize - *size);
1568 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1569 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1570 newinfo->hook_entry[h] -= origsize - *size;
1571 if ((unsigned char *)de - base < newinfo->underflow[h])
1572 newinfo->underflow[h] -= origsize - *size;
1577 translate_compat_table(struct net *net,
1578 struct xt_table_info **pinfo,
1580 const struct compat_ip6t_replace *compatr)
1583 struct xt_table_info *newinfo, *info;
1584 void *pos, *entry0, *entry1;
1585 struct compat_ip6t_entry *iter0;
1586 struct ip6t_replace repl;
1592 size = compatr->size;
1593 info->number = compatr->num_entries;
1595 duprintf("translate_compat_table: size %u\n", info->size);
1597 xt_compat_lock(AF_INET6);
1598 xt_compat_init_offsets(AF_INET6, compatr->num_entries);
1599 /* Walk through entries, checking offsets. */
1600 xt_entry_foreach(iter0, entry0, compatr->size) {
1601 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1603 entry0 + compatr->size);
1610 if (j != compatr->num_entries) {
1611 duprintf("translate_compat_table: %u not %u entries\n",
1612 j, compatr->num_entries);
1617 newinfo = xt_alloc_table_info(size);
1621 memset(newinfo->entries, 0, size);
1623 newinfo->number = compatr->num_entries;
1624 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1625 newinfo->hook_entry[i] = compatr->hook_entry[i];
1626 newinfo->underflow[i] = compatr->underflow[i];
1628 entry1 = newinfo->entries;
1630 size = compatr->size;
1631 xt_entry_foreach(iter0, entry0, compatr->size)
1632 compat_copy_entry_from_user(iter0, &pos, &size,
1635 /* all module references in entry0 are now gone. */
1636 xt_compat_flush_offsets(AF_INET6);
1637 xt_compat_unlock(AF_INET6);
1639 memcpy(&repl, compatr, sizeof(*compatr));
1641 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1642 repl.hook_entry[i] = newinfo->hook_entry[i];
1643 repl.underflow[i] = newinfo->underflow[i];
1646 repl.num_counters = 0;
1647 repl.counters = NULL;
1648 repl.size = newinfo->size;
1649 ret = translate_table(net, newinfo, entry1, &repl);
1655 xt_free_table_info(info);
1659 xt_free_table_info(newinfo);
1662 xt_compat_flush_offsets(AF_INET6);
1663 xt_compat_unlock(AF_INET6);
1664 xt_entry_foreach(iter0, entry0, compatr->size) {
1667 compat_release_entry(iter0);
1673 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1676 struct compat_ip6t_replace tmp;
1677 struct xt_table_info *newinfo;
1678 void *loc_cpu_entry;
1679 struct ip6t_entry *iter;
1681 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1684 /* overflow check */
1685 if (tmp.size >= INT_MAX / num_possible_cpus())
1687 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1689 if (tmp.num_counters == 0)
1692 tmp.name[sizeof(tmp.name)-1] = 0;
1694 newinfo = xt_alloc_table_info(tmp.size);
1698 loc_cpu_entry = newinfo->entries;
1699 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1705 ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1709 duprintf("compat_do_replace: Translated table\n");
1711 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1712 tmp.num_counters, compat_ptr(tmp.counters));
1714 goto free_newinfo_untrans;
1717 free_newinfo_untrans:
1718 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1719 cleanup_entry(iter, net);
1721 xt_free_table_info(newinfo);
1726 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1731 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1735 case IP6T_SO_SET_REPLACE:
1736 ret = compat_do_replace(sock_net(sk), user, len);
1739 case IP6T_SO_SET_ADD_COUNTERS:
1740 ret = do_add_counters(sock_net(sk), user, len, 1);
1744 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1751 struct compat_ip6t_get_entries {
1752 char name[XT_TABLE_MAXNAMELEN];
1754 struct compat_ip6t_entry entrytable[0];
1758 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1759 void __user *userptr)
1761 struct xt_counters *counters;
1762 const struct xt_table_info *private = table->private;
1767 struct ip6t_entry *iter;
1769 counters = alloc_counters(table);
1770 if (IS_ERR(counters))
1771 return PTR_ERR(counters);
1775 xt_entry_foreach(iter, private->entries, total_size) {
1776 ret = compat_copy_entry_to_user(iter, &pos,
1777 &size, counters, i++);
1787 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1791 struct compat_ip6t_get_entries get;
1794 if (*len < sizeof(get)) {
1795 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1799 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1802 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1803 duprintf("compat_get_entries: %u != %zu\n",
1804 *len, sizeof(get) + get.size);
1807 get.name[sizeof(get.name) - 1] = '\0';
1809 xt_compat_lock(AF_INET6);
1810 t = xt_find_table_lock(net, AF_INET6, get.name);
1811 if (!IS_ERR_OR_NULL(t)) {
1812 const struct xt_table_info *private = t->private;
1813 struct xt_table_info info;
1814 duprintf("t->private->number = %u\n", private->number);
1815 ret = compat_table_info(private, &info);
1816 if (!ret && get.size == info.size) {
1817 ret = compat_copy_entries_to_user(private->size,
1818 t, uptr->entrytable);
1820 duprintf("compat_get_entries: I've got %u not %u!\n",
1821 private->size, get.size);
1824 xt_compat_flush_offsets(AF_INET6);
1828 ret = t ? PTR_ERR(t) : -ENOENT;
1830 xt_compat_unlock(AF_INET6);
1834 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1837 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1841 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1845 case IP6T_SO_GET_INFO:
1846 ret = get_info(sock_net(sk), user, len, 1);
1848 case IP6T_SO_GET_ENTRIES:
1849 ret = compat_get_entries(sock_net(sk), user, len);
1852 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1859 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1863 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1867 case IP6T_SO_SET_REPLACE:
1868 ret = do_replace(sock_net(sk), user, len);
1871 case IP6T_SO_SET_ADD_COUNTERS:
1872 ret = do_add_counters(sock_net(sk), user, len, 0);
1876 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1884 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1888 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1892 case IP6T_SO_GET_INFO:
1893 ret = get_info(sock_net(sk), user, len, 0);
1896 case IP6T_SO_GET_ENTRIES:
1897 ret = get_entries(sock_net(sk), user, len);
1900 case IP6T_SO_GET_REVISION_MATCH:
1901 case IP6T_SO_GET_REVISION_TARGET: {
1902 struct xt_get_revision rev;
1905 if (*len != sizeof(rev)) {
1909 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1913 rev.name[sizeof(rev.name)-1] = 0;
1915 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1920 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
1923 "ip6t_%s", rev.name);
1928 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
1935 struct xt_table *ip6t_register_table(struct net *net,
1936 const struct xt_table *table,
1937 const struct ip6t_replace *repl)
1940 struct xt_table_info *newinfo;
1941 struct xt_table_info bootstrap = {0};
1942 void *loc_cpu_entry;
1943 struct xt_table *new_table;
1945 newinfo = xt_alloc_table_info(repl->size);
1951 loc_cpu_entry = newinfo->entries;
1952 memcpy(loc_cpu_entry, repl->entries, repl->size);
1954 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1958 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1959 if (IS_ERR(new_table)) {
1960 ret = PTR_ERR(new_table);
1966 xt_free_table_info(newinfo);
1968 return ERR_PTR(ret);
1971 void ip6t_unregister_table(struct net *net, struct xt_table *table)
1973 struct xt_table_info *private;
1974 void *loc_cpu_entry;
1975 struct module *table_owner = table->me;
1976 struct ip6t_entry *iter;
1978 private = xt_unregister_table(table);
1980 /* Decrease module usage counts and free resources */
1981 loc_cpu_entry = private->entries;
1982 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1983 cleanup_entry(iter, net);
1984 if (private->number > private->initial_entries)
1985 module_put(table_owner);
1986 xt_free_table_info(private);
1989 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1991 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1992 u_int8_t type, u_int8_t code,
1995 return (type == test_type && code >= min_code && code <= max_code)
2000 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2002 const struct icmp6hdr *ic;
2003 struct icmp6hdr _icmph;
2004 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2006 /* Must not be a fragment. */
2007 if (par->fragoff != 0)
2010 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2012 /* We've been asked to examine this packet, and we
2013 * can't. Hence, no choice but to drop.
2015 duprintf("Dropping evil ICMP tinygram.\n");
2016 par->hotdrop = true;
2020 return icmp6_type_code_match(icmpinfo->type,
2023 ic->icmp6_type, ic->icmp6_code,
2024 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2027 /* Called when user tries to insert an entry of this type. */
2028 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2030 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2032 /* Must specify no unknown invflags */
2033 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2036 /* The built-in targets: standard (NULL) and error. */
2037 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2039 .name = XT_STANDARD_TARGET,
2040 .targetsize = sizeof(int),
2041 .family = NFPROTO_IPV6,
2042 #ifdef CONFIG_COMPAT
2043 .compatsize = sizeof(compat_int_t),
2044 .compat_from_user = compat_standard_from_user,
2045 .compat_to_user = compat_standard_to_user,
2049 .name = XT_ERROR_TARGET,
2050 .target = ip6t_error,
2051 .targetsize = XT_FUNCTION_MAXNAMELEN,
2052 .family = NFPROTO_IPV6,
2056 static struct nf_sockopt_ops ip6t_sockopts = {
2058 .set_optmin = IP6T_BASE_CTL,
2059 .set_optmax = IP6T_SO_SET_MAX+1,
2060 .set = do_ip6t_set_ctl,
2061 #ifdef CONFIG_COMPAT
2062 .compat_set = compat_do_ip6t_set_ctl,
2064 .get_optmin = IP6T_BASE_CTL,
2065 .get_optmax = IP6T_SO_GET_MAX+1,
2066 .get = do_ip6t_get_ctl,
2067 #ifdef CONFIG_COMPAT
2068 .compat_get = compat_do_ip6t_get_ctl,
2070 .owner = THIS_MODULE,
2073 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2076 .match = icmp6_match,
2077 .matchsize = sizeof(struct ip6t_icmp),
2078 .checkentry = icmp6_checkentry,
2079 .proto = IPPROTO_ICMPV6,
2080 .family = NFPROTO_IPV6,
2085 static int __net_init ip6_tables_net_init(struct net *net)
2087 return xt_proto_init(net, NFPROTO_IPV6);
2090 static void __net_exit ip6_tables_net_exit(struct net *net)
2092 xt_proto_fini(net, NFPROTO_IPV6);
2095 static struct pernet_operations ip6_tables_net_ops = {
2096 .init = ip6_tables_net_init,
2097 .exit = ip6_tables_net_exit,
2100 static int __init ip6_tables_init(void)
2104 ret = register_pernet_subsys(&ip6_tables_net_ops);
2108 /* No one else will be downing sem now, so we won't sleep */
2109 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2112 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2116 /* Register setsockopt */
2117 ret = nf_register_sockopt(&ip6t_sockopts);
2121 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2125 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2127 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2129 unregister_pernet_subsys(&ip6_tables_net_ops);
2134 static void __exit ip6_tables_fini(void)
2136 nf_unregister_sockopt(&ip6t_sockopts);
2138 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2139 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2140 unregister_pernet_subsys(&ip6_tables_net_ops);
2143 EXPORT_SYMBOL(ip6t_register_table);
2144 EXPORT_SYMBOL(ip6t_unregister_table);
2145 EXPORT_SYMBOL(ip6t_do_table);
2147 module_init(ip6_tables_init);
2148 module_exit(ip6_tables_fini);