1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <linux/rhashtable.h>
7 #include <linux/ipv6.h>
8 #include <linux/netdevice.h>
11 #include <net/ip6_route.h>
12 #include <net/neighbour.h>
13 #include <net/netfilter/nf_flow_table.h>
14 /* For layer 4 checksum field offset. */
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
18 static int nf_flow_state_check(struct flow_offload *flow, int proto,
19 struct sk_buff *skb, unsigned int thoff)
23 if (proto != IPPROTO_TCP)
26 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)))
29 tcph = (void *)(skb_network_header(skb) + thoff);
30 if (unlikely(tcph->fin || tcph->rst)) {
31 flow_offload_teardown(flow);
38 static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
39 __be32 addr, __be32 new_addr)
43 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
44 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
47 tcph = (void *)(skb_network_header(skb) + thoff);
48 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
53 static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
54 __be32 addr, __be32 new_addr)
58 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
59 skb_try_make_writable(skb, thoff + sizeof(*udph)))
62 udph = (void *)(skb_network_header(skb) + thoff);
63 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
64 inet_proto_csum_replace4(&udph->check, skb, addr,
67 udph->check = CSUM_MANGLED_0;
73 static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
74 unsigned int thoff, __be32 addr,
77 switch (iph->protocol) {
79 if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0)
83 if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0)
91 static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb,
92 struct iphdr *iph, unsigned int thoff,
93 enum flow_offload_tuple_dir dir)
95 __be32 addr, new_addr;
98 case FLOW_OFFLOAD_DIR_ORIGINAL:
100 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
101 iph->saddr = new_addr;
103 case FLOW_OFFLOAD_DIR_REPLY:
105 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
106 iph->daddr = new_addr;
111 csum_replace4(&iph->check, addr, new_addr);
113 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
116 static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
117 struct iphdr *iph, unsigned int thoff,
118 enum flow_offload_tuple_dir dir)
120 __be32 addr, new_addr;
123 case FLOW_OFFLOAD_DIR_ORIGINAL:
125 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
126 iph->daddr = new_addr;
128 case FLOW_OFFLOAD_DIR_REPLY:
130 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
131 iph->saddr = new_addr;
136 csum_replace4(&iph->check, addr, new_addr);
138 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
141 static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
142 unsigned int thoff, enum flow_offload_tuple_dir dir)
144 struct iphdr *iph = ip_hdr(skb);
146 if (flow->flags & FLOW_OFFLOAD_SNAT &&
147 (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
148 nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
150 if (flow->flags & FLOW_OFFLOAD_DNAT &&
151 (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
152 nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
158 static bool ip_has_options(unsigned int thoff)
160 return thoff != sizeof(struct iphdr);
163 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
164 struct flow_offload_tuple *tuple)
166 struct flow_ports *ports;
170 if (!pskb_may_pull(skb, sizeof(*iph)))
174 thoff = iph->ihl * 4;
176 if (ip_is_fragment(iph) ||
177 unlikely(ip_has_options(thoff)))
180 if (iph->protocol != IPPROTO_TCP &&
181 iph->protocol != IPPROTO_UDP)
187 thoff = iph->ihl * 4;
188 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
192 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
194 tuple->src_v4.s_addr = iph->saddr;
195 tuple->dst_v4.s_addr = iph->daddr;
196 tuple->src_port = ports->source;
197 tuple->dst_port = ports->dest;
198 tuple->l3proto = AF_INET;
199 tuple->l4proto = iph->protocol;
200 tuple->iifidx = dev->ifindex;
205 /* Based on ip_exceeds_mtu(). */
206 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
211 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
218 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
219 const struct nf_hook_state *state)
221 struct flow_offload_tuple_rhash *tuplehash;
222 struct nf_flowtable *flow_table = priv;
223 struct flow_offload_tuple tuple = {};
224 enum flow_offload_tuple_dir dir;
225 struct flow_offload *flow;
226 struct net_device *outdev;
232 if (skb->protocol != htons(ETH_P_IP))
235 if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
238 tuplehash = flow_offload_lookup(flow_table, &tuple);
239 if (tuplehash == NULL)
242 outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
246 dir = tuplehash->tuple.dir;
247 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
248 rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
250 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
253 if (skb_try_make_writable(skb, sizeof(*iph)))
256 thoff = ip_hdr(skb)->ihl * 4;
257 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
260 if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
261 nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
264 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
266 ip_decrease_ttl(iph);
269 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
270 skb_dst_set_noref(skb, &rt->dst);
271 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
275 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
277 static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
278 struct in6_addr *addr,
279 struct in6_addr *new_addr)
283 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
284 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
287 tcph = (void *)(skb_network_header(skb) + thoff);
288 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
289 new_addr->s6_addr32, true);
294 static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
295 struct in6_addr *addr,
296 struct in6_addr *new_addr)
300 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
301 skb_try_make_writable(skb, thoff + sizeof(*udph)))
304 udph = (void *)(skb_network_header(skb) + thoff);
305 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
306 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
307 new_addr->s6_addr32, true);
309 udph->check = CSUM_MANGLED_0;
315 static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
316 unsigned int thoff, struct in6_addr *addr,
317 struct in6_addr *new_addr)
319 switch (ip6h->nexthdr) {
321 if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
325 if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
333 static int nf_flow_snat_ipv6(const struct flow_offload *flow,
334 struct sk_buff *skb, struct ipv6hdr *ip6h,
336 enum flow_offload_tuple_dir dir)
338 struct in6_addr addr, new_addr;
341 case FLOW_OFFLOAD_DIR_ORIGINAL:
343 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
344 ip6h->saddr = new_addr;
346 case FLOW_OFFLOAD_DIR_REPLY:
348 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
349 ip6h->daddr = new_addr;
355 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
358 static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
359 struct sk_buff *skb, struct ipv6hdr *ip6h,
361 enum flow_offload_tuple_dir dir)
363 struct in6_addr addr, new_addr;
366 case FLOW_OFFLOAD_DIR_ORIGINAL:
368 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
369 ip6h->daddr = new_addr;
371 case FLOW_OFFLOAD_DIR_REPLY:
373 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
374 ip6h->saddr = new_addr;
380 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
383 static int nf_flow_nat_ipv6(const struct flow_offload *flow,
385 enum flow_offload_tuple_dir dir)
387 struct ipv6hdr *ip6h = ipv6_hdr(skb);
388 unsigned int thoff = sizeof(*ip6h);
390 if (flow->flags & FLOW_OFFLOAD_SNAT &&
391 (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
392 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
394 if (flow->flags & FLOW_OFFLOAD_DNAT &&
395 (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
396 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
402 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
403 struct flow_offload_tuple *tuple)
405 struct flow_ports *ports;
406 struct ipv6hdr *ip6h;
409 if (!pskb_may_pull(skb, sizeof(*ip6h)))
412 ip6h = ipv6_hdr(skb);
414 if (ip6h->nexthdr != IPPROTO_TCP &&
415 ip6h->nexthdr != IPPROTO_UDP)
418 if (ip6h->hop_limit <= 1)
421 thoff = sizeof(*ip6h);
422 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
425 ip6h = ipv6_hdr(skb);
426 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
428 tuple->src_v6 = ip6h->saddr;
429 tuple->dst_v6 = ip6h->daddr;
430 tuple->src_port = ports->source;
431 tuple->dst_port = ports->dest;
432 tuple->l3proto = AF_INET6;
433 tuple->l4proto = ip6h->nexthdr;
434 tuple->iifidx = dev->ifindex;
440 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
441 const struct nf_hook_state *state)
443 struct flow_offload_tuple_rhash *tuplehash;
444 struct nf_flowtable *flow_table = priv;
445 struct flow_offload_tuple tuple = {};
446 enum flow_offload_tuple_dir dir;
447 struct flow_offload *flow;
448 struct net_device *outdev;
449 struct in6_addr *nexthop;
450 struct ipv6hdr *ip6h;
453 if (skb->protocol != htons(ETH_P_IPV6))
456 if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
459 tuplehash = flow_offload_lookup(flow_table, &tuple);
460 if (tuplehash == NULL)
463 outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
467 dir = tuplehash->tuple.dir;
468 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
469 rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
471 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
474 if (nf_flow_state_check(flow, ipv6_hdr(skb)->nexthdr, skb,
478 if (skb_try_make_writable(skb, sizeof(*ip6h)))
481 if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
482 nf_flow_nat_ipv6(flow, skb, dir) < 0)
485 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
486 ip6h = ipv6_hdr(skb);
490 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
491 skb_dst_set_noref(skb, &rt->dst);
492 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
496 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);