1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * net/sched/act_ct.c Connection Tracking action
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/pkt_cls.h>
17 #include <linux/ipv6.h>
18 #include <linux/rhashtable.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
24 #include <net/ipv6_frag.h>
25 #include <uapi/linux/tc_act/tc_ct.h>
26 #include <net/tc_act/tc_ct.h>
27 #include <net/tc_wrapper.h>
29 #include <net/netfilter/nf_flow_table.h>
30 #include <net/netfilter/nf_conntrack.h>
31 #include <net/netfilter/nf_conntrack_core.h>
32 #include <net/netfilter/nf_conntrack_zones.h>
33 #include <net/netfilter/nf_conntrack_helper.h>
34 #include <net/netfilter/nf_conntrack_acct.h>
35 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
36 #include <net/netfilter/nf_conntrack_act_ct.h>
37 #include <net/netfilter/nf_conntrack_seqadj.h>
38 #include <uapi/linux/netfilter/nf_nat.h>
40 static struct workqueue_struct *act_ct_wq;
41 static struct rhashtable zones_ht;
42 static DEFINE_MUTEX(zones_mutex);
44 struct tcf_ct_flow_table {
45 struct rhash_head node; /* In zones tables */
47 struct rcu_work rwork;
48 struct nf_flowtable nf_ft;
55 static const struct rhashtable_params zones_params = {
56 .head_offset = offsetof(struct tcf_ct_flow_table, node),
57 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
58 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
59 .automatic_shrinking = true,
62 static struct flow_action_entry *
63 tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
65 int i = flow_action->num_entries++;
67 return &flow_action->entries[i];
70 static void tcf_ct_add_mangle_action(struct flow_action *action,
71 enum flow_action_mangle_base htype,
76 struct flow_action_entry *entry;
78 entry = tcf_ct_flow_table_flow_action_get_next(action);
79 entry->id = FLOW_ACTION_MANGLE;
80 entry->mangle.htype = htype;
81 entry->mangle.mask = ~mask;
82 entry->mangle.offset = offset;
83 entry->mangle.val = val;
86 /* The following nat helper functions check if the inverted reverse tuple
87 * (target) is different then the current dir tuple - meaning nat for ports
88 * and/or ip is needed, and add the relevant mangle actions.
91 tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
92 struct nf_conntrack_tuple target,
93 struct flow_action *action)
95 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
96 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
97 offsetof(struct iphdr, saddr),
99 be32_to_cpu(target.src.u3.ip));
100 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
101 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
102 offsetof(struct iphdr, daddr),
104 be32_to_cpu(target.dst.u3.ip));
108 tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
109 union nf_inet_addr *addr,
114 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
115 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
116 i * sizeof(u32) + offset,
117 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
121 tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
122 struct nf_conntrack_tuple target,
123 struct flow_action *action)
125 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
126 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
127 offsetof(struct ipv6hdr,
129 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
130 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
131 offsetof(struct ipv6hdr,
136 tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
137 struct nf_conntrack_tuple target,
138 struct flow_action *action)
140 __be16 target_src = target.src.u.tcp.port;
141 __be16 target_dst = target.dst.u.tcp.port;
143 if (target_src != tuple->src.u.tcp.port)
144 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
145 offsetof(struct tcphdr, source),
146 0xFFFF, be16_to_cpu(target_src));
147 if (target_dst != tuple->dst.u.tcp.port)
148 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
149 offsetof(struct tcphdr, dest),
150 0xFFFF, be16_to_cpu(target_dst));
154 tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
155 struct nf_conntrack_tuple target,
156 struct flow_action *action)
158 __be16 target_src = target.src.u.udp.port;
159 __be16 target_dst = target.dst.u.udp.port;
161 if (target_src != tuple->src.u.udp.port)
162 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
163 offsetof(struct udphdr, source),
164 0xFFFF, be16_to_cpu(target_src));
165 if (target_dst != tuple->dst.u.udp.port)
166 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
167 offsetof(struct udphdr, dest),
168 0xFFFF, be16_to_cpu(target_dst));
171 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
172 enum ip_conntrack_dir dir,
173 enum ip_conntrack_info ctinfo,
174 struct flow_action *action)
176 struct nf_conn_labels *ct_labels;
177 struct flow_action_entry *entry;
180 entry = tcf_ct_flow_table_flow_action_get_next(action);
181 entry->id = FLOW_ACTION_CT_METADATA;
182 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
183 entry->ct_metadata.mark = READ_ONCE(ct->mark);
185 /* aligns with the CT reference on the SKB nf_ct_set */
186 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
187 entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
189 act_ct_labels = entry->ct_metadata.labels;
190 ct_labels = nf_ct_labels_find(ct);
192 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
194 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
197 static int tcf_ct_flow_table_add_action_nat(struct net *net,
199 enum ip_conntrack_dir dir,
200 struct flow_action *action)
202 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
203 struct nf_conntrack_tuple target;
205 if (!(ct->status & IPS_NAT_MASK))
208 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
210 switch (tuple->src.l3num) {
212 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
216 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
223 switch (nf_ct_protonum(ct)) {
225 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
228 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
237 static int tcf_ct_flow_table_fill_actions(struct net *net,
238 struct flow_offload *flow,
239 enum flow_offload_tuple_dir tdir,
240 struct nf_flow_rule *flow_rule)
242 struct flow_action *action = &flow_rule->rule->action;
243 int num_entries = action->num_entries;
244 struct nf_conn *ct = flow->ct;
245 enum ip_conntrack_info ctinfo;
246 enum ip_conntrack_dir dir;
250 case FLOW_OFFLOAD_DIR_ORIGINAL:
251 dir = IP_CT_DIR_ORIGINAL;
252 ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
253 IP_CT_ESTABLISHED : IP_CT_NEW;
254 if (ctinfo == IP_CT_ESTABLISHED)
255 set_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
257 case FLOW_OFFLOAD_DIR_REPLY:
258 dir = IP_CT_DIR_REPLY;
259 ctinfo = IP_CT_ESTABLISHED_REPLY;
265 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
269 tcf_ct_flow_table_add_action_meta(ct, dir, ctinfo, action);
273 /* Clear filled actions */
274 for (i = num_entries; i < action->num_entries; i++)
275 memset(&action->entries[i], 0, sizeof(action->entries[i]));
276 action->num_entries = num_entries;
281 static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
283 return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
284 test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
285 !test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
286 !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
289 static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft);
291 static void tcf_ct_nf_get(struct nf_flowtable *ft)
293 struct tcf_ct_flow_table *ct_ft =
294 container_of(ft, struct tcf_ct_flow_table, nf_ft);
296 tcf_ct_flow_table_get_ref(ct_ft);
299 static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft);
301 static void tcf_ct_nf_put(struct nf_flowtable *ft)
303 struct tcf_ct_flow_table *ct_ft =
304 container_of(ft, struct tcf_ct_flow_table, nf_ft);
306 tcf_ct_flow_table_put(ct_ft);
309 static struct nf_flowtable_type flowtable_ct = {
310 .gc = tcf_ct_flow_is_outdated,
311 .action = tcf_ct_flow_table_fill_actions,
312 .get = tcf_ct_nf_get,
313 .put = tcf_ct_nf_put,
314 .owner = THIS_MODULE,
317 static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
319 struct tcf_ct_flow_table *ct_ft;
322 mutex_lock(&zones_mutex);
323 ct_ft = rhashtable_lookup_fast(&zones_ht, ¶ms->zone, zones_params);
324 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
327 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
330 refcount_set(&ct_ft->ref, 1);
332 ct_ft->zone = params->zone;
333 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
337 ct_ft->nf_ft.type = &flowtable_ct;
338 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
339 NF_FLOWTABLE_COUNTER;
340 err = nf_flow_table_init(&ct_ft->nf_ft);
343 write_pnet(&ct_ft->nf_ft.net, net);
345 __module_get(THIS_MODULE);
347 params->ct_ft = ct_ft;
348 params->nf_ft = &ct_ft->nf_ft;
349 mutex_unlock(&zones_mutex);
354 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
358 mutex_unlock(&zones_mutex);
362 static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft)
364 refcount_inc(&ct_ft->ref);
367 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
369 struct tcf_ct_flow_table *ct_ft;
370 struct flow_block *block;
372 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
374 nf_flow_table_free(&ct_ft->nf_ft);
376 block = &ct_ft->nf_ft.flow_block;
377 down_write(&ct_ft->nf_ft.flow_block_lock);
378 WARN_ON(!list_empty(&block->cb_list));
379 up_write(&ct_ft->nf_ft.flow_block_lock);
382 module_put(THIS_MODULE);
385 static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft)
387 if (refcount_dec_and_test(&ct_ft->ref)) {
388 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
389 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
390 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
394 static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
395 struct nf_conn_act_ct_ext *act_ct_ext, u8 dir)
397 entry->tuplehash[dir].tuple.xmit_type = FLOW_OFFLOAD_XMIT_TC;
398 entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
401 static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
403 struct nf_conn_act_ct_ext *act_ct_ext;
405 act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
407 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
408 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
412 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
414 bool tcp, bool bidirectional)
416 struct nf_conn_act_ct_ext *act_ct_ext;
417 struct flow_offload *entry;
420 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
423 entry = flow_offload_alloc(ct);
430 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
431 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
434 __set_bit(NF_FLOW_HW_BIDIRECTIONAL, &entry->flags);
436 act_ct_ext = nf_conn_act_ct_ext_find(ct);
438 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
439 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
442 err = flow_offload_add(&ct_ft->nf_ft, entry);
449 flow_offload_free(entry);
451 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
454 static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
456 enum ip_conntrack_info ctinfo)
458 bool tcp = false, bidirectional = true;
460 switch (nf_ct_protonum(ct)) {
462 if ((ctinfo != IP_CT_ESTABLISHED &&
463 ctinfo != IP_CT_ESTABLISHED_REPLY) ||
464 !test_bit(IPS_ASSURED_BIT, &ct->status) ||
465 ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
471 if (!nf_ct_is_confirmed(ct))
473 if (!test_bit(IPS_ASSURED_BIT, &ct->status))
474 bidirectional = false;
476 #ifdef CONFIG_NF_CT_PROTO_GRE
478 struct nf_conntrack_tuple *tuple;
480 if ((ctinfo != IP_CT_ESTABLISHED &&
481 ctinfo != IP_CT_ESTABLISHED_REPLY) ||
482 !test_bit(IPS_ASSURED_BIT, &ct->status) ||
483 ct->status & IPS_NAT_MASK)
486 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
487 /* No support for GRE v1 */
488 if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
497 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
498 ct->status & IPS_SEQ_ADJUST)
501 tcf_ct_flow_table_add(ct_ft, ct, tcp, bidirectional);
505 tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
506 struct flow_offload_tuple *tuple,
507 struct tcphdr **tcph)
509 struct flow_ports *ports;
515 if (!pskb_network_may_pull(skb, sizeof(*iph)))
519 thoff = iph->ihl * 4;
521 if (ip_is_fragment(iph) ||
522 unlikely(thoff != sizeof(struct iphdr)))
525 ipproto = iph->protocol;
528 hdrsize = sizeof(struct tcphdr);
531 hdrsize = sizeof(*ports);
533 #ifdef CONFIG_NF_CT_PROTO_GRE
535 hdrsize = sizeof(struct gre_base_hdr);
545 if (!pskb_network_may_pull(skb, thoff + hdrsize))
550 *tcph = (void *)(skb_network_header(skb) + thoff);
553 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
554 tuple->src_port = ports->source;
555 tuple->dst_port = ports->dest;
558 struct gre_base_hdr *greh;
560 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
561 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
569 tuple->src_v4.s_addr = iph->saddr;
570 tuple->dst_v4.s_addr = iph->daddr;
571 tuple->l3proto = AF_INET;
572 tuple->l4proto = ipproto;
578 tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
579 struct flow_offload_tuple *tuple,
580 struct tcphdr **tcph)
582 struct flow_ports *ports;
583 struct ipv6hdr *ip6h;
588 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
591 ip6h = ipv6_hdr(skb);
592 thoff = sizeof(*ip6h);
594 nexthdr = ip6h->nexthdr;
597 hdrsize = sizeof(struct tcphdr);
600 hdrsize = sizeof(*ports);
602 #ifdef CONFIG_NF_CT_PROTO_GRE
604 hdrsize = sizeof(struct gre_base_hdr);
611 if (ip6h->hop_limit <= 1)
614 if (!pskb_network_may_pull(skb, thoff + hdrsize))
619 *tcph = (void *)(skb_network_header(skb) + thoff);
622 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
623 tuple->src_port = ports->source;
624 tuple->dst_port = ports->dest;
627 struct gre_base_hdr *greh;
629 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
630 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
636 ip6h = ipv6_hdr(skb);
638 tuple->src_v6 = ip6h->saddr;
639 tuple->dst_v6 = ip6h->daddr;
640 tuple->l3proto = AF_INET6;
641 tuple->l4proto = nexthdr;
646 static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
650 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
651 struct flow_offload_tuple_rhash *tuplehash;
652 struct flow_offload_tuple tuple = {};
653 enum ip_conntrack_info ctinfo;
654 struct tcphdr *tcph = NULL;
655 bool force_refresh = false;
656 struct flow_offload *flow;
662 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
666 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
673 tuplehash = flow_offload_lookup(nf_ft, &tuple);
677 dir = tuplehash->tuple.dir;
678 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
681 if (dir == FLOW_OFFLOAD_DIR_REPLY &&
682 !test_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags)) {
683 /* Only offload reply direction after connection became
686 if (test_bit(IPS_ASSURED_BIT, &ct->status))
687 set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
688 else if (test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags))
689 /* If flow_table flow has already been updated to the
690 * established state, then don't refresh.
693 force_refresh = true;
696 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
697 flow_offload_teardown(flow);
701 if (dir == FLOW_OFFLOAD_DIR_ORIGINAL)
702 ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
703 IP_CT_ESTABLISHED : IP_CT_NEW;
705 ctinfo = IP_CT_ESTABLISHED_REPLY;
707 nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
708 tcf_ct_flow_ct_ext_ifidx_update(flow);
709 flow_offload_refresh(nf_ft, flow, force_refresh);
710 if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
711 /* Process this flow in SW to allow promoting to ASSURED */
715 nf_conntrack_get(&ct->ct_general);
716 nf_ct_set(skb, ct, ctinfo);
717 if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
718 nf_ct_acct_update(ct, dir, skb->len);
723 static int tcf_ct_flow_tables_init(void)
725 return rhashtable_init(&zones_ht, &zones_params);
728 static void tcf_ct_flow_tables_uninit(void)
730 rhashtable_destroy(&zones_ht);
733 static struct tc_action_ops act_ct_ops;
735 struct tc_ct_action_net {
736 struct tc_action_net tn; /* Must be first */
739 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
740 static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
741 struct tcf_ct_params *p)
743 enum ip_conntrack_info ctinfo;
746 ct = nf_ct_get(skb, &ctinfo);
749 if (!net_eq(net, read_pnet(&ct->ct_net)))
751 if (nf_ct_zone(ct)->id != p->zone)
754 struct nf_conn_help *help;
756 help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
757 if (help && rcu_access_pointer(help->helper) != p->helper)
761 /* Force conntrack entry direction. */
762 if ((p->ct_action & TCA_CT_ACT_FORCE) &&
763 CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
764 if (nf_ct_is_confirmed(ct))
774 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
779 static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
781 u8 family = NFPROTO_UNSPEC;
783 switch (skb_protocol(skb, true)) {
784 case htons(ETH_P_IP):
785 family = NFPROTO_IPV4;
787 case htons(ETH_P_IPV6):
788 family = NFPROTO_IPV6;
797 static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
801 len = skb_network_offset(skb) + sizeof(struct iphdr);
802 if (unlikely(skb->len < len))
804 if (unlikely(!pskb_may_pull(skb, len)))
807 *frag = ip_is_fragment(ip_hdr(skb));
811 static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
813 unsigned int flags = 0, len, payload_ofs = 0;
814 unsigned short frag_off;
817 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
818 if (unlikely(skb->len < len))
820 if (unlikely(!pskb_may_pull(skb, len)))
823 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
824 if (unlikely(nexthdr < 0))
827 *frag = flags & IP6_FH_F_FRAG;
831 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
832 u8 family, u16 zone, bool *defrag)
834 enum ip_conntrack_info ctinfo;
841 /* Previously seen (loopback)? Ignore. */
842 ct = nf_ct_get(skb, &ctinfo);
843 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
846 if (family == NFPROTO_IPV4)
847 err = tcf_ct_ipv4_is_fragment(skb, &frag);
849 err = tcf_ct_ipv6_is_fragment(skb, &frag);
853 err = nf_ct_handle_fragments(net, skb, zone, family, &proto, &mru);
858 tc_skb_cb(skb)->mru = mru;
863 static void tcf_ct_params_free(struct tcf_ct_params *params)
865 if (params->helper) {
866 #if IS_ENABLED(CONFIG_NF_NAT)
867 if (params->ct_action & TCA_CT_ACT_NAT)
868 nf_nat_helper_put(params->helper);
870 nf_conntrack_helper_put(params->helper);
873 tcf_ct_flow_table_put(params->ct_ft);
875 if (params->put_labels)
876 nf_connlabels_put(nf_ct_net(params->tmpl));
878 nf_ct_put(params->tmpl);
884 static void tcf_ct_params_free_rcu(struct rcu_head *head)
886 struct tcf_ct_params *params;
888 params = container_of(head, struct tcf_ct_params, rcu);
889 tcf_ct_params_free(params);
892 static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
894 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
900 new_mark = mark | (READ_ONCE(ct->mark) & ~(mask));
901 if (READ_ONCE(ct->mark) != new_mark) {
902 WRITE_ONCE(ct->mark, new_mark);
903 if (nf_ct_is_confirmed(ct))
904 nf_conntrack_event_cache(IPCT_MARK, ct);
909 static void tcf_ct_act_set_labels(struct nf_conn *ct,
913 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
914 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
916 if (!memchr_inv(labels_m, 0, labels_sz))
919 nf_connlabels_replace(ct, labels, labels_m, 4);
923 static int tcf_ct_act_nat(struct sk_buff *skb,
925 enum ip_conntrack_info ctinfo,
927 struct nf_nat_range2 *range,
930 #if IS_ENABLED(CONFIG_NF_NAT)
933 if (!(ct_action & TCA_CT_ACT_NAT))
935 if (ct_action & TCA_CT_ACT_NAT_SRC)
936 action |= BIT(NF_NAT_MANIP_SRC);
937 if (ct_action & TCA_CT_ACT_NAT_DST)
938 action |= BIT(NF_NAT_MANIP_DST);
940 err = nf_ct_nat(skb, ct, ctinfo, &action, range, commit);
942 if (action & BIT(NF_NAT_MANIP_SRC))
943 tc_skb_cb(skb)->post_ct_snat = 1;
944 if (action & BIT(NF_NAT_MANIP_DST))
945 tc_skb_cb(skb)->post_ct_dnat = 1;
953 TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
954 struct tcf_result *res)
956 struct net *net = dev_net(skb->dev);
957 enum ip_conntrack_info ctinfo;
958 struct tcf_ct *c = to_ct(a);
959 struct nf_conn *tmpl = NULL;
960 struct nf_hook_state state;
961 bool cached, commit, clear;
962 int nh_ofs, err, retval;
963 struct tcf_ct_params *p;
964 bool add_helper = false;
965 bool skip_add = false;
970 p = rcu_dereference_bh(c->params);
972 retval = READ_ONCE(c->tcf_action);
973 commit = p->ct_action & TCA_CT_ACT_COMMIT;
974 clear = p->ct_action & TCA_CT_ACT_CLEAR;
977 tcf_lastuse_update(&c->tcf_tm);
978 tcf_action_update_bstats(&c->common, skb);
981 tc_skb_cb(skb)->post_ct = false;
982 ct = nf_ct_get(skb, &ctinfo);
985 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
991 family = tcf_ct_skb_nf_family(skb);
992 if (family == NFPROTO_UNSPEC)
995 /* The conntrack module expects to be working at L3.
996 * We also try to pull the IPv4/6 header to linear area
998 nh_ofs = skb_network_offset(skb);
999 skb_pull_rcsum(skb, nh_ofs);
1000 err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
1004 err = nf_ct_skb_network_trim(skb, family);
1008 /* If we are recirculating packets to match on ct fields and
1009 * committing with a separate ct action, then we don't need to
1010 * actually run the packet through conntrack twice unless it's for a
1013 cached = tcf_ct_skb_nfct_cached(net, skb, p);
1015 if (tcf_ct_flow_table_lookup(p, skb, family)) {
1020 /* Associate skb with specified zone. */
1022 nf_conntrack_put(skb_nfct(skb));
1023 nf_conntrack_get(&tmpl->ct_general);
1024 nf_ct_set(skb, tmpl, IP_CT_NEW);
1027 state.hook = NF_INET_PRE_ROUTING;
1030 err = nf_conntrack_in(skb, &state);
1031 if (err != NF_ACCEPT)
1036 ct = nf_ct_get(skb, &ctinfo);
1039 nf_ct_deliver_cached_events(ct);
1040 nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
1042 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1043 if (err != NF_ACCEPT)
1046 if (!nf_ct_is_confirmed(ct) && commit && p->helper && !nfct_help(ct)) {
1047 err = __nf_ct_try_assign_helper(ct, p->tmpl, GFP_ATOMIC);
1051 if (p->ct_action & TCA_CT_ACT_NAT && !nfct_seqadj(ct)) {
1052 if (!nfct_seqadj_ext_add(ct))
1057 if (nf_ct_is_confirmed(ct) ? ((!cached && !skip_add) || add_helper) : commit) {
1058 if (nf_ct_helper(skb, ct, ctinfo, family) != NF_ACCEPT)
1063 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1064 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1066 if (!nf_ct_is_confirmed(ct))
1067 nf_conn_act_ct_ext_add(skb, ct, ctinfo);
1069 /* This will take care of sending queued events
1070 * even if the connection is already confirmed.
1072 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1077 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1080 skb_push_rcsum(skb, nh_ofs);
1082 tc_skb_cb(skb)->post_ct = true;
1083 tc_skb_cb(skb)->zone = p->zone;
1086 qdisc_skb_cb(skb)->pkt_len = skb->len;
1090 if (err != -EINPROGRESS)
1091 tcf_action_inc_drop_qstats(&c->common);
1092 return TC_ACT_CONSUMED;
1095 tcf_action_inc_drop_qstats(&c->common);
1099 static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1100 [TCA_CT_ACTION] = { .type = NLA_U16 },
1101 [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1102 [TCA_CT_ZONE] = { .type = NLA_U16 },
1103 [TCA_CT_MARK] = { .type = NLA_U32 },
1104 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1105 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1106 .len = 128 / BITS_PER_BYTE },
1107 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1108 .len = 128 / BITS_PER_BYTE },
1109 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1110 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1111 [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1112 [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1113 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1114 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1115 [TCA_CT_HELPER_NAME] = { .type = NLA_STRING, .len = NF_CT_HELPER_NAME_LEN },
1116 [TCA_CT_HELPER_FAMILY] = { .type = NLA_U8 },
1117 [TCA_CT_HELPER_PROTO] = { .type = NLA_U8 },
1120 static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1123 struct netlink_ext_ack *extack)
1125 struct nf_nat_range2 *range;
1127 if (!(p->ct_action & TCA_CT_ACT_NAT))
1130 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1131 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1135 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1138 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1139 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1140 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1145 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1146 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1148 p->ipv4_range = true;
1149 range->flags |= NF_NAT_RANGE_MAP_IPS;
1150 range->min_addr.ip =
1151 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1153 range->max_addr.ip = max_attr ?
1154 nla_get_in_addr(max_attr) :
1156 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1157 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1159 p->ipv4_range = false;
1160 range->flags |= NF_NAT_RANGE_MAP_IPS;
1161 range->min_addr.in6 =
1162 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1164 range->max_addr.in6 = max_attr ?
1165 nla_get_in6_addr(max_attr) :
1166 range->min_addr.in6;
1169 if (tb[TCA_CT_NAT_PORT_MIN]) {
1170 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1171 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1173 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1174 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1175 range->min_proto.all;
1181 static void tcf_ct_set_key_val(struct nlattr **tb,
1182 void *val, int val_type,
1183 void *mask, int mask_type,
1188 nla_memcpy(val, tb[val_type], len);
1193 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1194 memset(mask, 0xff, len);
1196 nla_memcpy(mask, tb[mask_type], len);
1199 static int tcf_ct_fill_params(struct net *net,
1200 struct tcf_ct_params *p,
1203 struct netlink_ext_ack *extack)
1205 struct nf_conntrack_zone zone;
1206 int err, family, proto, len;
1207 bool put_labels = false;
1208 struct nf_conn *tmpl;
1211 p->zone = NF_CT_DEFAULT_ZONE_ID;
1213 tcf_ct_set_key_val(tb,
1214 &p->ct_action, TCA_CT_ACTION,
1215 NULL, TCA_CT_UNSPEC,
1216 sizeof(p->ct_action));
1218 if (p->ct_action & TCA_CT_ACT_CLEAR)
1221 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1225 if (tb[TCA_CT_MARK]) {
1226 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1227 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1230 tcf_ct_set_key_val(tb,
1231 &p->mark, TCA_CT_MARK,
1232 &p->mark_mask, TCA_CT_MARK_MASK,
1236 if (tb[TCA_CT_LABELS]) {
1237 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1239 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1240 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1244 if (nf_connlabels_get(net, n_bits - 1)) {
1245 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1251 tcf_ct_set_key_val(tb,
1252 p->labels, TCA_CT_LABELS,
1253 p->labels_mask, TCA_CT_LABELS_MASK,
1257 if (tb[TCA_CT_ZONE]) {
1258 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1259 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1263 tcf_ct_set_key_val(tb,
1264 &p->zone, TCA_CT_ZONE,
1265 NULL, TCA_CT_UNSPEC,
1269 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1270 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1272 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1276 if (tb[TCA_CT_HELPER_NAME]) {
1277 name = nla_data(tb[TCA_CT_HELPER_NAME]);
1278 len = nla_len(tb[TCA_CT_HELPER_NAME]);
1279 if (len > 16 || name[len - 1] != '\0') {
1280 NL_SET_ERR_MSG_MOD(extack, "Failed to parse helper name.");
1284 family = tb[TCA_CT_HELPER_FAMILY] ? nla_get_u8(tb[TCA_CT_HELPER_FAMILY]) : AF_INET;
1285 proto = tb[TCA_CT_HELPER_PROTO] ? nla_get_u8(tb[TCA_CT_HELPER_PROTO]) : IPPROTO_TCP;
1286 err = nf_ct_add_helper(tmpl, name, family, proto,
1287 p->ct_action & TCA_CT_ACT_NAT, &p->helper);
1289 NL_SET_ERR_MSG_MOD(extack, "Failed to add helper");
1294 p->put_labels = put_labels;
1296 if (p->ct_action & TCA_CT_ACT_COMMIT)
1297 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1301 nf_connlabels_put(net);
1308 static int tcf_ct_init(struct net *net, struct nlattr *nla,
1309 struct nlattr *est, struct tc_action **a,
1310 struct tcf_proto *tp, u32 flags,
1311 struct netlink_ext_ack *extack)
1313 struct tc_action_net *tn = net_generic(net, act_ct_ops.net_id);
1314 bool bind = flags & TCA_ACT_FLAGS_BIND;
1315 struct tcf_ct_params *params = NULL;
1316 struct nlattr *tb[TCA_CT_MAX + 1];
1317 struct tcf_chain *goto_ch = NULL;
1324 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1328 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1332 if (!tb[TCA_CT_PARMS]) {
1333 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1336 parm = nla_data(tb[TCA_CT_PARMS]);
1337 index = parm->index;
1338 err = tcf_idr_check_alloc(tn, &index, a, bind);
1343 err = tcf_idr_create_from_flags(tn, index, est, a,
1344 &act_ct_ops, bind, flags);
1346 tcf_idr_cleanup(tn, index);
1349 res = ACT_P_CREATED;
1354 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
1355 tcf_idr_release(*a, bind);
1359 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1365 params = kzalloc(sizeof(*params), GFP_KERNEL);
1366 if (unlikely(!params)) {
1371 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1375 err = tcf_ct_flow_table_get(net, params);
1379 spin_lock_bh(&c->tcf_lock);
1380 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1381 params = rcu_replace_pointer(c->params, params,
1382 lockdep_is_held(&c->tcf_lock));
1383 spin_unlock_bh(&c->tcf_lock);
1386 tcf_chain_put_by_act(goto_ch);
1388 call_rcu(¶ms->rcu, tcf_ct_params_free_rcu);
1394 tcf_chain_put_by_act(goto_ch);
1396 tcf_ct_params_free(params);
1397 tcf_idr_release(*a, bind);
1401 static void tcf_ct_cleanup(struct tc_action *a)
1403 struct tcf_ct_params *params;
1404 struct tcf_ct *c = to_ct(a);
1406 params = rcu_dereference_protected(c->params, 1);
1408 call_rcu(¶ms->rcu, tcf_ct_params_free_rcu);
1411 static int tcf_ct_dump_key_val(struct sk_buff *skb,
1412 void *val, int val_type,
1413 void *mask, int mask_type,
1418 if (mask && !memchr_inv(mask, 0, len))
1421 err = nla_put(skb, val_type, len, val);
1425 if (mask_type != TCA_CT_UNSPEC) {
1426 err = nla_put(skb, mask_type, len, mask);
1434 static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1436 struct nf_nat_range2 *range = &p->range;
1438 if (!(p->ct_action & TCA_CT_ACT_NAT))
1441 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1444 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1445 if (p->ipv4_range) {
1446 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1447 range->min_addr.ip))
1449 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1450 range->max_addr.ip))
1453 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1454 &range->min_addr.in6))
1456 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1457 &range->max_addr.in6))
1462 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1463 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1464 range->min_proto.all))
1466 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1467 range->max_proto.all))
1474 static int tcf_ct_dump_helper(struct sk_buff *skb, struct nf_conntrack_helper *helper)
1479 if (nla_put_string(skb, TCA_CT_HELPER_NAME, helper->name) ||
1480 nla_put_u8(skb, TCA_CT_HELPER_FAMILY, helper->tuple.src.l3num) ||
1481 nla_put_u8(skb, TCA_CT_HELPER_PROTO, helper->tuple.dst.protonum))
1487 static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1490 unsigned char *b = skb_tail_pointer(skb);
1491 struct tcf_ct *c = to_ct(a);
1492 struct tcf_ct_params *p;
1494 struct tc_ct opt = {
1495 .index = c->tcf_index,
1496 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1497 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1501 spin_lock_bh(&c->tcf_lock);
1502 p = rcu_dereference_protected(c->params,
1503 lockdep_is_held(&c->tcf_lock));
1504 opt.action = c->tcf_action;
1506 if (tcf_ct_dump_key_val(skb,
1507 &p->ct_action, TCA_CT_ACTION,
1508 NULL, TCA_CT_UNSPEC,
1509 sizeof(p->ct_action)))
1510 goto nla_put_failure;
1512 if (p->ct_action & TCA_CT_ACT_CLEAR)
1515 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1516 tcf_ct_dump_key_val(skb,
1517 &p->mark, TCA_CT_MARK,
1518 &p->mark_mask, TCA_CT_MARK_MASK,
1520 goto nla_put_failure;
1522 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1523 tcf_ct_dump_key_val(skb,
1524 p->labels, TCA_CT_LABELS,
1525 p->labels_mask, TCA_CT_LABELS_MASK,
1527 goto nla_put_failure;
1529 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1530 tcf_ct_dump_key_val(skb,
1531 &p->zone, TCA_CT_ZONE,
1532 NULL, TCA_CT_UNSPEC,
1534 goto nla_put_failure;
1536 if (tcf_ct_dump_nat(skb, p))
1537 goto nla_put_failure;
1539 if (tcf_ct_dump_helper(skb, p->helper))
1540 goto nla_put_failure;
1543 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1544 goto nla_put_failure;
1546 tcf_tm_dump(&t, &c->tcf_tm);
1547 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1548 goto nla_put_failure;
1549 spin_unlock_bh(&c->tcf_lock);
1553 spin_unlock_bh(&c->tcf_lock);
1558 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1559 u64 drops, u64 lastuse, bool hw)
1561 struct tcf_ct *c = to_ct(a);
1563 tcf_action_update_stats(a, bytes, packets, drops, hw);
1564 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1567 static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
1568 u32 *index_inc, bool bind,
1569 struct netlink_ext_ack *extack)
1572 struct flow_action_entry *entry = entry_data;
1574 if (tcf_ct_helper(act))
1577 entry->id = FLOW_ACTION_CT;
1578 entry->ct.action = tcf_ct_action(act);
1579 entry->ct.zone = tcf_ct_zone(act);
1580 entry->ct.flow_table = tcf_ct_ft(act);
1583 struct flow_offload_action *fl_action = entry_data;
1585 fl_action->id = FLOW_ACTION_CT;
1591 static struct tc_action_ops act_ct_ops = {
1594 .owner = THIS_MODULE,
1596 .dump = tcf_ct_dump,
1597 .init = tcf_ct_init,
1598 .cleanup = tcf_ct_cleanup,
1599 .stats_update = tcf_stats_update,
1600 .offload_act_setup = tcf_ct_offload_act_setup,
1601 .size = sizeof(struct tcf_ct),
1604 static __net_init int ct_init_net(struct net *net)
1606 struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
1608 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1611 static void __net_exit ct_exit_net(struct list_head *net_list)
1613 tc_action_net_exit(net_list, act_ct_ops.net_id);
1616 static struct pernet_operations ct_net_ops = {
1617 .init = ct_init_net,
1618 .exit_batch = ct_exit_net,
1619 .id = &act_ct_ops.net_id,
1620 .size = sizeof(struct tc_ct_action_net),
1623 static int __init ct_init_module(void)
1627 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1631 err = tcf_ct_flow_tables_init();
1635 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1639 static_branch_inc(&tcf_frag_xmit_count);
1644 tcf_ct_flow_tables_uninit();
1646 destroy_workqueue(act_ct_wq);
1650 static void __exit ct_cleanup_module(void)
1652 static_branch_dec(&tcf_frag_xmit_count);
1653 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1654 tcf_ct_flow_tables_uninit();
1655 destroy_workqueue(act_ct_wq);
1658 module_init(ct_init_module);
1659 module_exit(ct_cleanup_module);
1660 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1661 MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1662 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1663 MODULE_DESCRIPTION("Connection tracking action");
1664 MODULE_LICENSE("GPL v2");