2 * Checksum updating actions
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
23 #include <linux/skbuff.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
32 #include <net/ip6_checksum.h>
33 #include <net/sctp/checksum.h>
35 #include <net/act_api.h>
37 #include <linux/tc_act/tc_csum.h>
38 #include <net/tc_act/tc_csum.h>
40 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
41 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
44 static unsigned int csum_net_id;
45 static struct tc_action_ops act_csum_ops;
47 static int tcf_csum_init(struct net *net, struct nlattr *nla,
48 struct nlattr *est, struct tc_action **a, int ovr,
51 struct tc_action_net *tn = net_generic(net, csum_net_id);
52 struct nlattr *tb[TCA_CSUM_MAX + 1];
60 err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy, NULL);
64 if (tb[TCA_CSUM_PARMS] == NULL)
66 parm = nla_data(tb[TCA_CSUM_PARMS]);
68 if (!tcf_idr_check(tn, parm->index, a, bind)) {
69 ret = tcf_idr_create(tn, parm->index, est, a,
70 &act_csum_ops, bind, false);
75 if (bind)/* dont override defaults */
77 tcf_idr_release(*a, bind);
83 spin_lock_bh(&p->tcf_lock);
84 p->tcf_action = parm->action;
85 p->update_flags = parm->update_flags;
86 spin_unlock_bh(&p->tcf_lock);
88 if (ret == ACT_P_CREATED)
89 tcf_idr_insert(tn, *a);
95 * tcf_csum_skb_nextlayer - Get next layer pointer
96 * @skb: sk_buff to use
97 * @ihl: previous summed headers length
98 * @ipl: complete packet length
99 * @jhl: next header length
101 * Check the expected next layer availability in the specified sk_buff.
102 * Return the next layer pointer if pass, NULL otherwise.
104 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
105 unsigned int ihl, unsigned int ipl,
108 int ntkoff = skb_network_offset(skb);
111 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
112 skb_try_make_writable(skb, hl + ntkoff))
115 return (void *)(skb_network_header(skb) + ihl);
118 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
121 struct icmphdr *icmph;
123 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
128 skb->csum = csum_partial(icmph, ipl - ihl, 0);
129 icmph->checksum = csum_fold(skb->csum);
131 skb->ip_summed = CHECKSUM_NONE;
136 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
137 unsigned int ihl, unsigned int ipl)
139 struct igmphdr *igmph;
141 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
146 skb->csum = csum_partial(igmph, ipl - ihl, 0);
147 igmph->csum = csum_fold(skb->csum);
149 skb->ip_summed = CHECKSUM_NONE;
154 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
157 struct icmp6hdr *icmp6h;
158 const struct ipv6hdr *ip6h;
160 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
164 ip6h = ipv6_hdr(skb);
165 icmp6h->icmp6_cksum = 0;
166 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
167 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
168 ipl - ihl, IPPROTO_ICMPV6,
171 skb->ip_summed = CHECKSUM_NONE;
176 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
180 const struct iphdr *iph;
182 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
185 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
191 skb->csum = csum_partial(tcph, ipl - ihl, 0);
192 tcph->check = tcp_v4_check(ipl - ihl,
193 iph->saddr, iph->daddr, skb->csum);
195 skb->ip_summed = CHECKSUM_NONE;
200 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
204 const struct ipv6hdr *ip6h;
206 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
209 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
213 ip6h = ipv6_hdr(skb);
215 skb->csum = csum_partial(tcph, ipl - ihl, 0);
216 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
217 ipl - ihl, IPPROTO_TCP,
220 skb->ip_summed = CHECKSUM_NONE;
225 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
226 unsigned int ipl, int udplite)
229 const struct iphdr *iph;
232 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
236 * Support both UDP and UDPLITE checksum algorithms, Don't use
237 * udph->len to get the real length without any protocol check,
238 * UDPLITE uses udph->len for another thing,
239 * Use iph->tot_len, or just ipl.
242 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
247 ul = ntohs(udph->len);
249 if (udplite || udph->check) {
255 skb->csum = csum_partial(udph, ipl - ihl, 0);
256 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
257 skb->csum = csum_partial(udph, ul, 0);
259 goto ignore_obscure_skb;
262 goto ignore_obscure_skb;
264 skb->csum = csum_partial(udph, ul, 0);
267 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
272 udph->check = CSUM_MANGLED_0;
275 skb->ip_summed = CHECKSUM_NONE;
281 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
282 unsigned int ipl, int udplite)
285 const struct ipv6hdr *ip6h;
288 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
292 * Support both UDP and UDPLITE checksum algorithms, Don't use
293 * udph->len to get the real length without any protocol check,
294 * UDPLITE uses udph->len for another thing,
295 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
298 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
302 ip6h = ipv6_hdr(skb);
303 ul = ntohs(udph->len);
309 skb->csum = csum_partial(udph, ipl - ihl, 0);
311 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
312 skb->csum = csum_partial(udph, ul, 0);
315 goto ignore_obscure_skb;
318 goto ignore_obscure_skb;
320 skb->csum = csum_partial(udph, ul, 0);
323 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
324 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
328 udph->check = CSUM_MANGLED_0;
330 skb->ip_summed = CHECKSUM_NONE;
336 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
339 struct sctphdr *sctph;
341 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_SCTP)
344 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
348 sctph->checksum = sctp_compute_cksum(skb,
349 skb_network_offset(skb) + ihl);
350 skb->ip_summed = CHECKSUM_NONE;
351 skb->csum_not_inet = 0;
356 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
358 const struct iphdr *iph;
361 ntkoff = skb_network_offset(skb);
363 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
368 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
370 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
371 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
372 ntohs(iph->tot_len)))
376 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
377 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
378 ntohs(iph->tot_len)))
382 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
383 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
384 ntohs(iph->tot_len)))
388 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
389 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
390 ntohs(iph->tot_len), 0))
393 case IPPROTO_UDPLITE:
394 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
395 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
396 ntohs(iph->tot_len), 1))
400 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
401 !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
406 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
407 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
410 ip_send_check(ip_hdr(skb));
419 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
422 int off, len, optlen;
423 unsigned char *xh = (void *)ip6xh;
425 off = sizeof(*ip6xh);
434 optlen = xh[off + 1] + 2;
435 if (optlen != 6 || len < 6 || (off & 3) != 2)
436 /* wrong jumbo option length/alignment */
438 *pl = ntohl(*(__be32 *)(xh + off + 2));
441 optlen = xh[off + 1] + 2;
443 /* ignore obscure options */
455 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
457 struct ipv6hdr *ip6h;
458 struct ipv6_opt_hdr *ip6xh;
459 unsigned int hl, ixhl;
464 ntkoff = skb_network_offset(skb);
468 if (!pskb_may_pull(skb, hl + ntkoff))
471 ip6h = ipv6_hdr(skb);
473 pl = ntohs(ip6h->payload_len);
474 nexthdr = ip6h->nexthdr;
478 case NEXTHDR_FRAGMENT:
480 case NEXTHDR_ROUTING:
483 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
485 ip6xh = (void *)(skb_network_header(skb) + hl);
486 ixhl = ipv6_optlen(ip6xh);
487 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
489 ip6xh = (void *)(skb_network_header(skb) + hl);
490 if ((nexthdr == NEXTHDR_HOP) &&
491 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
493 nexthdr = ip6xh->nexthdr;
497 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
498 if (!tcf_csum_ipv6_icmp(skb,
499 hl, pl + sizeof(*ip6h)))
503 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
504 if (!tcf_csum_ipv6_tcp(skb,
505 hl, pl + sizeof(*ip6h)))
509 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
510 if (!tcf_csum_ipv6_udp(skb, hl,
511 pl + sizeof(*ip6h), 0))
514 case IPPROTO_UDPLITE:
515 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
516 if (!tcf_csum_ipv6_udp(skb, hl,
517 pl + sizeof(*ip6h), 1))
521 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
522 !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
528 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
538 static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
539 struct tcf_result *res)
541 struct tcf_csum *p = to_tcf_csum(a);
545 spin_lock(&p->tcf_lock);
546 tcf_lastuse_update(&p->tcf_tm);
547 bstats_update(&p->tcf_bstats, skb);
548 action = p->tcf_action;
549 update_flags = p->update_flags;
550 spin_unlock(&p->tcf_lock);
552 if (unlikely(action == TC_ACT_SHOT))
555 switch (tc_skb_protocol(skb)) {
556 case cpu_to_be16(ETH_P_IP):
557 if (!tcf_csum_ipv4(skb, update_flags))
560 case cpu_to_be16(ETH_P_IPV6):
561 if (!tcf_csum_ipv6(skb, update_flags))
569 spin_lock(&p->tcf_lock);
570 p->tcf_qstats.drops++;
571 spin_unlock(&p->tcf_lock);
575 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
578 unsigned char *b = skb_tail_pointer(skb);
579 struct tcf_csum *p = to_tcf_csum(a);
580 struct tc_csum opt = {
581 .update_flags = p->update_flags,
582 .index = p->tcf_index,
583 .action = p->tcf_action,
584 .refcnt = p->tcf_refcnt - ref,
585 .bindcnt = p->tcf_bindcnt - bind,
589 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
590 goto nla_put_failure;
592 tcf_tm_dump(&t, &p->tcf_tm);
593 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
594 goto nla_put_failure;
603 static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
604 struct netlink_callback *cb, int type,
605 const struct tc_action_ops *ops)
607 struct tc_action_net *tn = net_generic(net, csum_net_id);
609 return tcf_generic_walker(tn, skb, cb, type, ops);
612 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
614 struct tc_action_net *tn = net_generic(net, csum_net_id);
616 return tcf_idr_search(tn, a, index);
619 static struct tc_action_ops act_csum_ops = {
621 .type = TCA_ACT_CSUM,
622 .owner = THIS_MODULE,
624 .dump = tcf_csum_dump,
625 .init = tcf_csum_init,
626 .walk = tcf_csum_walker,
627 .lookup = tcf_csum_search,
628 .size = sizeof(struct tcf_csum),
631 static __net_init int csum_init_net(struct net *net)
633 struct tc_action_net *tn = net_generic(net, csum_net_id);
635 return tc_action_net_init(net, tn, &act_csum_ops);
638 static void __net_exit csum_exit_net(struct net *net)
640 struct tc_action_net *tn = net_generic(net, csum_net_id);
642 tc_action_net_exit(tn);
645 static struct pernet_operations csum_net_ops = {
646 .init = csum_init_net,
647 .exit = csum_exit_net,
649 .size = sizeof(struct tc_action_net),
652 MODULE_DESCRIPTION("Checksum updating actions");
653 MODULE_LICENSE("GPL");
655 static int __init csum_init_module(void)
657 return tcf_register_action(&act_csum_ops, &csum_net_ops);
660 static void __exit csum_cleanup_module(void)
662 tcf_unregister_action(&act_csum_ops, &csum_net_ops);
665 module_init(csum_init_module);
666 module_exit(csum_cleanup_module);