2 * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Development of IPv6 NAT funded by Astaro.
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/skbuff.h>
13 #include <linux/ipv6.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter_ipv6.h>
16 #include <net/secure_seq.h>
17 #include <net/checksum.h>
18 #include <net/ip6_checksum.h>
19 #include <net/ip6_route.h>
22 #include <net/netfilter/nf_conntrack_core.h>
23 #include <net/netfilter/nf_conntrack.h>
24 #include <net/netfilter/nf_nat_core.h>
25 #include <net/netfilter/nf_nat_l3proto.h>
26 #include <net/netfilter/nf_nat_l4proto.h>
28 static const struct nf_nat_l3proto nf_nat_l3proto_ipv6;
31 static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
32 const struct nf_conn *ct,
33 enum ip_conntrack_dir dir,
34 unsigned long statusbit,
37 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
38 struct flowi6 *fl6 = &fl->u.ip6;
40 if (ct->status & statusbit) {
41 fl6->daddr = t->dst.u3.in6;
42 if (t->dst.protonum == IPPROTO_TCP ||
43 t->dst.protonum == IPPROTO_UDP ||
44 t->dst.protonum == IPPROTO_UDPLITE ||
45 t->dst.protonum == IPPROTO_DCCP ||
46 t->dst.protonum == IPPROTO_SCTP)
47 fl6->fl6_dport = t->dst.u.all;
50 statusbit ^= IPS_NAT_MASK;
52 if (ct->status & statusbit) {
53 fl6->saddr = t->src.u3.in6;
54 if (t->dst.protonum == IPPROTO_TCP ||
55 t->dst.protonum == IPPROTO_UDP ||
56 t->dst.protonum == IPPROTO_UDPLITE ||
57 t->dst.protonum == IPPROTO_DCCP ||
58 t->dst.protonum == IPPROTO_SCTP)
59 fl6->fl6_sport = t->src.u.all;
64 static bool nf_nat_ipv6_in_range(const struct nf_conntrack_tuple *t,
65 const struct nf_nat_range *range)
67 return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
68 ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
71 static u32 nf_nat_ipv6_secure_port(const struct nf_conntrack_tuple *t,
74 return secure_ipv6_port_ephemeral(t->src.u3.ip6, t->dst.u3.ip6, dport);
77 static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
78 unsigned int iphdroff,
79 const struct nf_nat_l4proto *l4proto,
80 const struct nf_conntrack_tuple *target,
81 enum nf_nat_manip_type maniptype)
83 struct ipv6hdr *ipv6h;
88 if (!skb_make_writable(skb, iphdroff + sizeof(*ipv6h)))
91 ipv6h = (void *)skb->data + iphdroff;
92 nexthdr = ipv6h->nexthdr;
93 hdroff = ipv6_skip_exthdr(skb, iphdroff + sizeof(*ipv6h),
98 if ((frag_off & htons(~0x7)) == 0 &&
99 !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
103 /* must reload, offset might have changed */
104 ipv6h = (void *)skb->data + iphdroff;
107 if (maniptype == NF_NAT_MANIP_SRC)
108 ipv6h->saddr = target->src.u3.in6;
110 ipv6h->daddr = target->dst.u3.in6;
115 static void nf_nat_ipv6_csum_update(struct sk_buff *skb,
116 unsigned int iphdroff, __sum16 *check,
117 const struct nf_conntrack_tuple *t,
118 enum nf_nat_manip_type maniptype)
120 const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + iphdroff);
121 const struct in6_addr *oldip, *newip;
123 if (maniptype == NF_NAT_MANIP_SRC) {
124 oldip = &ipv6h->saddr;
125 newip = &t->src.u3.in6;
127 oldip = &ipv6h->daddr;
128 newip = &t->dst.u3.in6;
130 inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
131 newip->s6_addr32, true);
134 static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
135 u8 proto, void *data, __sum16 *check,
136 int datalen, int oldlen)
138 if (skb->ip_summed != CHECKSUM_PARTIAL) {
139 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
141 skb->ip_summed = CHECKSUM_PARTIAL;
142 skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) +
143 (data - (void *)skb->data);
144 skb->csum_offset = (void *)check - data;
145 *check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
148 inet_proto_csum_replace2(check, skb,
149 htons(oldlen), htons(datalen), true);
152 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
153 static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
154 struct nf_nat_range *range)
156 if (tb[CTA_NAT_V6_MINIP]) {
157 nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
158 sizeof(struct in6_addr));
159 range->flags |= NF_NAT_RANGE_MAP_IPS;
162 if (tb[CTA_NAT_V6_MAXIP])
163 nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
164 sizeof(struct in6_addr));
166 range->max_addr = range->min_addr;
172 static const struct nf_nat_l3proto nf_nat_l3proto_ipv6 = {
173 .l3proto = NFPROTO_IPV6,
174 .secure_port = nf_nat_ipv6_secure_port,
175 .in_range = nf_nat_ipv6_in_range,
176 .manip_pkt = nf_nat_ipv6_manip_pkt,
177 .csum_update = nf_nat_ipv6_csum_update,
178 .csum_recalc = nf_nat_ipv6_csum_recalc,
179 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
180 .nlattr_to_range = nf_nat_ipv6_nlattr_to_range,
183 .decode_session = nf_nat_ipv6_decode_session,
187 int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
189 enum ip_conntrack_info ctinfo,
190 unsigned int hooknum,
194 struct icmp6hdr icmp6;
197 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
198 enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
199 const struct nf_nat_l4proto *l4proto;
200 struct nf_conntrack_tuple target;
201 unsigned long statusbit;
203 NF_CT_ASSERT(ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY);
205 if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
207 if (nf_ip6_checksum(skb, hooknum, hdrlen, IPPROTO_ICMPV6))
210 inside = (void *)skb->data + hdrlen;
211 if (inside->icmp6.icmp6_type == NDISC_REDIRECT) {
212 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
214 if (ct->status & IPS_NAT_MASK)
218 if (manip == NF_NAT_MANIP_SRC)
219 statusbit = IPS_SRC_NAT;
221 statusbit = IPS_DST_NAT;
223 /* Invert if this is reply direction */
224 if (dir == IP_CT_DIR_REPLY)
225 statusbit ^= IPS_NAT_MASK;
227 if (!(ct->status & statusbit))
230 l4proto = __nf_nat_l4proto_find(NFPROTO_IPV6, inside->ip6.nexthdr);
231 if (!nf_nat_ipv6_manip_pkt(skb, hdrlen + sizeof(inside->icmp6),
232 l4proto, &ct->tuplehash[!dir].tuple, !manip))
235 if (skb->ip_summed != CHECKSUM_PARTIAL) {
236 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
237 inside = (void *)skb->data + hdrlen;
238 inside->icmp6.icmp6_cksum = 0;
239 inside->icmp6.icmp6_cksum =
240 csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
241 skb->len - hdrlen, IPPROTO_ICMPV6,
242 csum_partial(&inside->icmp6,
243 skb->len - hdrlen, 0));
246 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
247 l4proto = __nf_nat_l4proto_find(NFPROTO_IPV6, IPPROTO_ICMPV6);
248 if (!nf_nat_ipv6_manip_pkt(skb, 0, l4proto, &target, manip))
253 EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
256 nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
257 const struct nf_hook_state *state,
258 unsigned int (*do_chain)(void *priv,
260 const struct nf_hook_state *state,
264 enum ip_conntrack_info ctinfo;
265 struct nf_conn_nat *nat;
266 enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
271 ct = nf_ct_get(skb, &ctinfo);
272 /* Can't track? It's not due to stress, or conntrack would
273 * have dropped it. Hence it's the user's responsibilty to
274 * packet filter it out, or implement conntrack/NAT for that
280 /* Don't try to NAT if this packet is not conntracked */
281 if (nf_ct_is_untracked(ct))
284 nat = nf_ct_nat_ext_add(ct);
290 case IP_CT_RELATED_REPLY:
291 nexthdr = ipv6_hdr(skb)->nexthdr;
292 hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
293 &nexthdr, &frag_off);
295 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
296 if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
303 /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
305 /* Seen it before? This can happen for loopback, retrans,
308 if (!nf_nat_initialized(ct, maniptype)) {
311 ret = do_chain(priv, skb, state, ct);
312 if (ret != NF_ACCEPT)
315 if (nf_nat_initialized(ct, HOOK2MANIP(state->hook)))
318 ret = nf_nat_alloc_null_binding(ct, state->hook);
319 if (ret != NF_ACCEPT)
322 pr_debug("Already setup manip %s for ct %p\n",
323 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
325 if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
332 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
333 ctinfo == IP_CT_ESTABLISHED_REPLY);
334 if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
338 return nf_nat_packet(ct, ctinfo, state->hook, skb);
341 nf_ct_kill_acct(ct, ctinfo, skb);
344 EXPORT_SYMBOL_GPL(nf_nat_ipv6_fn);
347 nf_nat_ipv6_in(void *priv, struct sk_buff *skb,
348 const struct nf_hook_state *state,
349 unsigned int (*do_chain)(void *priv,
351 const struct nf_hook_state *state,
355 struct in6_addr daddr = ipv6_hdr(skb)->daddr;
357 ret = nf_nat_ipv6_fn(priv, skb, state, do_chain);
358 if (ret != NF_DROP && ret != NF_STOLEN &&
359 ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
364 EXPORT_SYMBOL_GPL(nf_nat_ipv6_in);
367 nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
368 const struct nf_hook_state *state,
369 unsigned int (*do_chain)(void *priv,
371 const struct nf_hook_state *state,
375 const struct nf_conn *ct;
376 enum ip_conntrack_info ctinfo;
381 /* root is playing with raw sockets. */
382 if (skb->len < sizeof(struct ipv6hdr))
385 ret = nf_nat_ipv6_fn(priv, skb, state, do_chain);
387 if (ret != NF_DROP && ret != NF_STOLEN &&
388 !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
389 (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
390 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
392 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
393 &ct->tuplehash[!dir].tuple.dst.u3) ||
394 (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
395 ct->tuplehash[dir].tuple.src.u.all !=
396 ct->tuplehash[!dir].tuple.dst.u.all)) {
397 err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
399 ret = NF_DROP_ERR(err);
405 EXPORT_SYMBOL_GPL(nf_nat_ipv6_out);
408 nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
409 const struct nf_hook_state *state,
410 unsigned int (*do_chain)(void *priv,
412 const struct nf_hook_state *state,
415 const struct nf_conn *ct;
416 enum ip_conntrack_info ctinfo;
420 /* root is playing with raw sockets. */
421 if (skb->len < sizeof(struct ipv6hdr))
424 ret = nf_nat_ipv6_fn(priv, skb, state, do_chain);
425 if (ret != NF_DROP && ret != NF_STOLEN &&
426 (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
427 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
429 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
430 &ct->tuplehash[!dir].tuple.src.u3)) {
431 err = ip6_route_me_harder(state->net, skb);
433 ret = NF_DROP_ERR(err);
436 else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
437 ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
438 ct->tuplehash[dir].tuple.dst.u.all !=
439 ct->tuplehash[!dir].tuple.src.u.all) {
440 err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
442 ret = NF_DROP_ERR(err);
448 EXPORT_SYMBOL_GPL(nf_nat_ipv6_local_fn);
450 static int __init nf_nat_l3proto_ipv6_init(void)
454 err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
457 err = nf_nat_l3proto_register(&nf_nat_l3proto_ipv6);
463 nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
468 static void __exit nf_nat_l3proto_ipv6_exit(void)
470 nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv6);
471 nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
474 MODULE_LICENSE("GPL");
475 MODULE_ALIAS("nf-nat-" __stringify(AF_INET6));
477 module_init(nf_nat_l3proto_ipv6_init);
478 module_exit(nf_nat_l3proto_ipv6_exit);