1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * net/sched/act_ct.c Connection Tracking action
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/pkt_cls.h>
17 #include <linux/ipv6.h>
18 #include <linux/rhashtable.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
24 #include <net/ipv6_frag.h>
25 #include <uapi/linux/tc_act/tc_ct.h>
26 #include <net/tc_act/tc_ct.h>
28 #include <net/netfilter/nf_flow_table.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_zones.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/nf_conntrack_acct.h>
34 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
35 #include <net/netfilter/nf_conntrack_act_ct.h>
36 #include <uapi/linux/netfilter/nf_nat.h>
38 static struct workqueue_struct *act_ct_wq;
39 static struct rhashtable zones_ht;
40 static DEFINE_MUTEX(zones_mutex);
42 struct tcf_ct_flow_table {
43 struct rhash_head node; /* In zones tables */
45 struct rcu_work rwork;
46 struct nf_flowtable nf_ft;
53 static const struct rhashtable_params zones_params = {
54 .head_offset = offsetof(struct tcf_ct_flow_table, node),
55 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
56 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
57 .automatic_shrinking = true,
60 static struct flow_action_entry *
61 tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
63 int i = flow_action->num_entries++;
65 return &flow_action->entries[i];
68 static void tcf_ct_add_mangle_action(struct flow_action *action,
69 enum flow_action_mangle_base htype,
74 struct flow_action_entry *entry;
76 entry = tcf_ct_flow_table_flow_action_get_next(action);
77 entry->id = FLOW_ACTION_MANGLE;
78 entry->mangle.htype = htype;
79 entry->mangle.mask = ~mask;
80 entry->mangle.offset = offset;
81 entry->mangle.val = val;
84 /* The following nat helper functions check if the inverted reverse tuple
85 * (target) is different then the current dir tuple - meaning nat for ports
86 * and/or ip is needed, and add the relevant mangle actions.
89 tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
90 struct nf_conntrack_tuple target,
91 struct flow_action *action)
93 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
94 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
95 offsetof(struct iphdr, saddr),
97 be32_to_cpu(target.src.u3.ip));
98 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
99 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
100 offsetof(struct iphdr, daddr),
102 be32_to_cpu(target.dst.u3.ip));
106 tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
107 union nf_inet_addr *addr,
112 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
113 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
114 i * sizeof(u32) + offset,
115 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
119 tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
120 struct nf_conntrack_tuple target,
121 struct flow_action *action)
123 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
124 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
125 offsetof(struct ipv6hdr,
127 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
128 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
129 offsetof(struct ipv6hdr,
134 tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
135 struct nf_conntrack_tuple target,
136 struct flow_action *action)
138 __be16 target_src = target.src.u.tcp.port;
139 __be16 target_dst = target.dst.u.tcp.port;
141 if (target_src != tuple->src.u.tcp.port)
142 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
143 offsetof(struct tcphdr, source),
144 0xFFFF, be16_to_cpu(target_src));
145 if (target_dst != tuple->dst.u.tcp.port)
146 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
147 offsetof(struct tcphdr, dest),
148 0xFFFF, be16_to_cpu(target_dst));
152 tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
153 struct nf_conntrack_tuple target,
154 struct flow_action *action)
156 __be16 target_src = target.src.u.udp.port;
157 __be16 target_dst = target.dst.u.udp.port;
159 if (target_src != tuple->src.u.udp.port)
160 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
161 offsetof(struct udphdr, source),
162 0xFFFF, be16_to_cpu(target_src));
163 if (target_dst != tuple->dst.u.udp.port)
164 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
165 offsetof(struct udphdr, dest),
166 0xFFFF, be16_to_cpu(target_dst));
169 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
170 enum ip_conntrack_dir dir,
171 enum ip_conntrack_info ctinfo,
172 struct flow_action *action)
174 struct nf_conn_labels *ct_labels;
175 struct flow_action_entry *entry;
178 entry = tcf_ct_flow_table_flow_action_get_next(action);
179 entry->id = FLOW_ACTION_CT_METADATA;
180 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
181 entry->ct_metadata.mark = READ_ONCE(ct->mark);
183 /* aligns with the CT reference on the SKB nf_ct_set */
184 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
185 entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
187 act_ct_labels = entry->ct_metadata.labels;
188 ct_labels = nf_ct_labels_find(ct);
190 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
192 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
195 static int tcf_ct_flow_table_add_action_nat(struct net *net,
197 enum ip_conntrack_dir dir,
198 struct flow_action *action)
200 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
201 struct nf_conntrack_tuple target;
203 if (!(ct->status & IPS_NAT_MASK))
206 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
208 switch (tuple->src.l3num) {
210 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
214 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
221 switch (nf_ct_protonum(ct)) {
223 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
226 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
235 static int tcf_ct_flow_table_fill_actions(struct net *net,
236 struct flow_offload *flow,
237 enum flow_offload_tuple_dir tdir,
238 struct nf_flow_rule *flow_rule)
240 struct flow_action *action = &flow_rule->rule->action;
241 int num_entries = action->num_entries;
242 struct nf_conn *ct = flow->ct;
243 enum ip_conntrack_info ctinfo;
244 enum ip_conntrack_dir dir;
248 case FLOW_OFFLOAD_DIR_ORIGINAL:
249 dir = IP_CT_DIR_ORIGINAL;
250 ctinfo = IP_CT_ESTABLISHED;
251 set_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
253 case FLOW_OFFLOAD_DIR_REPLY:
254 dir = IP_CT_DIR_REPLY;
255 ctinfo = IP_CT_ESTABLISHED_REPLY;
261 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
265 tcf_ct_flow_table_add_action_meta(ct, dir, ctinfo, action);
269 /* Clear filled actions */
270 for (i = num_entries; i < action->num_entries; i++)
271 memset(&action->entries[i], 0, sizeof(action->entries[i]));
272 action->num_entries = num_entries;
277 static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
279 return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
280 test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
281 !test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
282 !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
285 static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft);
287 static void tcf_ct_nf_get(struct nf_flowtable *ft)
289 struct tcf_ct_flow_table *ct_ft =
290 container_of(ft, struct tcf_ct_flow_table, nf_ft);
292 tcf_ct_flow_table_get_ref(ct_ft);
295 static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft);
297 static void tcf_ct_nf_put(struct nf_flowtable *ft)
299 struct tcf_ct_flow_table *ct_ft =
300 container_of(ft, struct tcf_ct_flow_table, nf_ft);
302 tcf_ct_flow_table_put(ct_ft);
305 static struct nf_flowtable_type flowtable_ct = {
306 .gc = tcf_ct_flow_is_outdated,
307 .action = tcf_ct_flow_table_fill_actions,
308 .get = tcf_ct_nf_get,
309 .put = tcf_ct_nf_put,
310 .owner = THIS_MODULE,
313 static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
315 struct tcf_ct_flow_table *ct_ft;
318 mutex_lock(&zones_mutex);
319 ct_ft = rhashtable_lookup_fast(&zones_ht, ¶ms->zone, zones_params);
320 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
323 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
326 refcount_set(&ct_ft->ref, 1);
328 ct_ft->zone = params->zone;
329 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
333 ct_ft->nf_ft.type = &flowtable_ct;
334 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
335 NF_FLOWTABLE_COUNTER;
336 err = nf_flow_table_init(&ct_ft->nf_ft);
339 write_pnet(&ct_ft->nf_ft.net, net);
341 __module_get(THIS_MODULE);
343 params->ct_ft = ct_ft;
344 params->nf_ft = &ct_ft->nf_ft;
345 mutex_unlock(&zones_mutex);
350 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
354 mutex_unlock(&zones_mutex);
358 static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft)
360 refcount_inc(&ct_ft->ref);
363 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
365 struct tcf_ct_flow_table *ct_ft;
366 struct flow_block *block;
368 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
370 nf_flow_table_free(&ct_ft->nf_ft);
372 block = &ct_ft->nf_ft.flow_block;
373 down_write(&ct_ft->nf_ft.flow_block_lock);
374 WARN_ON(!list_empty(&block->cb_list));
375 up_write(&ct_ft->nf_ft.flow_block_lock);
378 module_put(THIS_MODULE);
381 static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft)
383 if (refcount_dec_and_test(&ct_ft->ref)) {
384 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
385 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
386 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
390 static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
391 struct nf_conn_act_ct_ext *act_ct_ext, u8 dir)
393 entry->tuplehash[dir].tuple.xmit_type = FLOW_OFFLOAD_XMIT_TC;
394 entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
397 static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
399 struct nf_conn_act_ct_ext *act_ct_ext;
401 act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
403 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
404 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
408 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
410 bool tcp, bool bidirectional)
412 struct nf_conn_act_ct_ext *act_ct_ext;
413 struct flow_offload *entry;
416 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
419 entry = flow_offload_alloc(ct);
426 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
427 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
430 __set_bit(NF_FLOW_HW_BIDIRECTIONAL, &entry->flags);
432 act_ct_ext = nf_conn_act_ct_ext_find(ct);
434 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
435 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
438 err = flow_offload_add(&ct_ft->nf_ft, entry);
445 flow_offload_free(entry);
447 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
450 static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
452 enum ip_conntrack_info ctinfo)
454 bool tcp = false, bidirectional = true;
456 switch (nf_ct_protonum(ct)) {
458 if ((ctinfo != IP_CT_ESTABLISHED &&
459 ctinfo != IP_CT_ESTABLISHED_REPLY) ||
460 !test_bit(IPS_ASSURED_BIT, &ct->status) ||
461 ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
467 if (!nf_ct_is_confirmed(ct))
469 if (!test_bit(IPS_ASSURED_BIT, &ct->status))
470 bidirectional = false;
472 #ifdef CONFIG_NF_CT_PROTO_GRE
474 struct nf_conntrack_tuple *tuple;
476 if ((ctinfo != IP_CT_ESTABLISHED &&
477 ctinfo != IP_CT_ESTABLISHED_REPLY) ||
478 !test_bit(IPS_ASSURED_BIT, &ct->status) ||
479 ct->status & IPS_NAT_MASK)
482 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
483 /* No support for GRE v1 */
484 if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
493 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
494 ct->status & IPS_SEQ_ADJUST)
497 tcf_ct_flow_table_add(ct_ft, ct, tcp, bidirectional);
501 tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
502 struct flow_offload_tuple *tuple,
503 struct tcphdr **tcph)
505 struct flow_ports *ports;
511 if (!pskb_network_may_pull(skb, sizeof(*iph)))
515 thoff = iph->ihl * 4;
517 if (ip_is_fragment(iph) ||
518 unlikely(thoff != sizeof(struct iphdr)))
521 ipproto = iph->protocol;
524 hdrsize = sizeof(struct tcphdr);
527 hdrsize = sizeof(*ports);
529 #ifdef CONFIG_NF_CT_PROTO_GRE
531 hdrsize = sizeof(struct gre_base_hdr);
541 if (!pskb_network_may_pull(skb, thoff + hdrsize))
546 *tcph = (void *)(skb_network_header(skb) + thoff);
549 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
550 tuple->src_port = ports->source;
551 tuple->dst_port = ports->dest;
554 struct gre_base_hdr *greh;
556 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
557 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
565 tuple->src_v4.s_addr = iph->saddr;
566 tuple->dst_v4.s_addr = iph->daddr;
567 tuple->l3proto = AF_INET;
568 tuple->l4proto = ipproto;
574 tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
575 struct flow_offload_tuple *tuple,
576 struct tcphdr **tcph)
578 struct flow_ports *ports;
579 struct ipv6hdr *ip6h;
584 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
587 ip6h = ipv6_hdr(skb);
588 thoff = sizeof(*ip6h);
590 nexthdr = ip6h->nexthdr;
593 hdrsize = sizeof(struct tcphdr);
596 hdrsize = sizeof(*ports);
598 #ifdef CONFIG_NF_CT_PROTO_GRE
600 hdrsize = sizeof(struct gre_base_hdr);
607 if (ip6h->hop_limit <= 1)
610 if (!pskb_network_may_pull(skb, thoff + hdrsize))
615 *tcph = (void *)(skb_network_header(skb) + thoff);
618 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
619 tuple->src_port = ports->source;
620 tuple->dst_port = ports->dest;
623 struct gre_base_hdr *greh;
625 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
626 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
632 ip6h = ipv6_hdr(skb);
634 tuple->src_v6 = ip6h->saddr;
635 tuple->dst_v6 = ip6h->daddr;
636 tuple->l3proto = AF_INET6;
637 tuple->l4proto = nexthdr;
642 static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
646 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
647 struct flow_offload_tuple_rhash *tuplehash;
648 struct flow_offload_tuple tuple = {};
649 enum ip_conntrack_info ctinfo;
650 struct tcphdr *tcph = NULL;
651 bool force_refresh = false;
652 struct flow_offload *flow;
658 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
662 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
669 tuplehash = flow_offload_lookup(nf_ft, &tuple);
673 dir = tuplehash->tuple.dir;
674 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
677 if (dir == FLOW_OFFLOAD_DIR_REPLY &&
678 !test_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags)) {
679 /* Only offload reply direction after connection became
682 if (test_bit(IPS_ASSURED_BIT, &ct->status))
683 set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
684 else if (test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags))
685 /* If flow_table flow has already been updated to the
686 * established state, then don't refresh.
689 force_refresh = true;
692 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
693 flow_offload_teardown(flow);
697 if (dir == FLOW_OFFLOAD_DIR_ORIGINAL)
698 ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
699 IP_CT_ESTABLISHED : IP_CT_NEW;
701 ctinfo = IP_CT_ESTABLISHED_REPLY;
703 nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
704 tcf_ct_flow_ct_ext_ifidx_update(flow);
705 flow_offload_refresh(nf_ft, flow, force_refresh);
706 if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
707 /* Process this flow in SW to allow promoting to ASSURED */
711 nf_conntrack_get(&ct->ct_general);
712 nf_ct_set(skb, ct, ctinfo);
713 if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
714 nf_ct_acct_update(ct, dir, skb->len);
719 static int tcf_ct_flow_tables_init(void)
721 return rhashtable_init(&zones_ht, &zones_params);
724 static void tcf_ct_flow_tables_uninit(void)
726 rhashtable_destroy(&zones_ht);
729 static struct tc_action_ops act_ct_ops;
731 struct tc_ct_action_net {
732 struct tc_action_net tn; /* Must be first */
736 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
737 static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
738 u16 zone_id, bool force)
740 enum ip_conntrack_info ctinfo;
743 ct = nf_ct_get(skb, &ctinfo);
746 if (!net_eq(net, read_pnet(&ct->ct_net)))
748 if (nf_ct_zone(ct)->id != zone_id)
751 /* Force conntrack entry direction. */
752 if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
753 if (nf_ct_is_confirmed(ct))
763 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
768 /* Trim the skb to the length specified by the IP/IPv6 header,
769 * removing any trailing lower-layer padding. This prepares the skb
770 * for higher-layer processing that assumes skb->len excludes padding
771 * (such as nf_ip_checksum). The caller needs to pull the skb to the
772 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
774 static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
780 len = ntohs(ip_hdr(skb)->tot_len);
783 len = sizeof(struct ipv6hdr)
784 + ntohs(ipv6_hdr(skb)->payload_len);
790 return pskb_trim_rcsum(skb, len);
793 static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
795 u8 family = NFPROTO_UNSPEC;
797 switch (skb_protocol(skb, true)) {
798 case htons(ETH_P_IP):
799 family = NFPROTO_IPV4;
801 case htons(ETH_P_IPV6):
802 family = NFPROTO_IPV6;
811 static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
815 len = skb_network_offset(skb) + sizeof(struct iphdr);
816 if (unlikely(skb->len < len))
818 if (unlikely(!pskb_may_pull(skb, len)))
821 *frag = ip_is_fragment(ip_hdr(skb));
825 static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
827 unsigned int flags = 0, len, payload_ofs = 0;
828 unsigned short frag_off;
831 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
832 if (unlikely(skb->len < len))
834 if (unlikely(!pskb_may_pull(skb, len)))
837 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
838 if (unlikely(nexthdr < 0))
841 *frag = flags & IP6_FH_F_FRAG;
845 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
846 u8 family, u16 zone, bool *defrag)
848 enum ip_conntrack_info ctinfo;
854 /* Previously seen (loopback)? Ignore. */
855 ct = nf_ct_get(skb, &ctinfo);
856 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
859 if (family == NFPROTO_IPV4)
860 err = tcf_ct_ipv4_is_fragment(skb, &frag);
862 err = tcf_ct_ipv6_is_fragment(skb, &frag);
866 mru = tc_skb_cb(skb)->mru;
868 if (family == NFPROTO_IPV4) {
869 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
871 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
873 err = ip_defrag(net, skb, user);
875 if (err && err != -EINPROGRESS)
880 mru = IPCB(skb)->frag_max_size;
882 } else { /* NFPROTO_IPV6 */
883 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
884 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
886 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
887 err = nf_ct_frag6_gather(net, skb, user);
888 if (err && err != -EINPROGRESS)
893 mru = IP6CB(skb)->frag_max_size;
901 if (err != -EINPROGRESS)
902 tc_skb_cb(skb)->mru = mru;
912 static void tcf_ct_params_free(struct tcf_ct_params *params)
915 tcf_ct_flow_table_put(params->ct_ft);
917 nf_ct_put(params->tmpl);
921 static void tcf_ct_params_free_rcu(struct rcu_head *head)
923 struct tcf_ct_params *params;
925 params = container_of(head, struct tcf_ct_params, rcu);
926 tcf_ct_params_free(params);
929 #if IS_ENABLED(CONFIG_NF_NAT)
930 /* Modelled after nf_nat_ipv[46]_fn().
931 * range is only used for new, uninitialized NAT state.
932 * Returns either NF_ACCEPT or NF_DROP.
934 static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
935 enum ip_conntrack_info ctinfo,
936 const struct nf_nat_range2 *range,
937 enum nf_nat_manip_type maniptype)
939 __be16 proto = skb_protocol(skb, true);
940 int hooknum, err = NF_ACCEPT;
942 /* See HOOK2MANIP(). */
943 if (maniptype == NF_NAT_MANIP_SRC)
944 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
946 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
950 case IP_CT_RELATED_REPLY:
951 if (proto == htons(ETH_P_IP) &&
952 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
953 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
957 } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
959 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
960 int hdrlen = ipv6_skip_exthdr(skb,
961 sizeof(struct ipv6hdr),
962 &nexthdr, &frag_off);
964 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
965 if (!nf_nat_icmpv6_reply_translation(skb, ct,
973 /* Non-ICMP, fall thru to initialize if needed. */
976 /* Seen it before? This can happen for loopback, retrans,
979 if (!nf_nat_initialized(ct, maniptype)) {
980 /* Initialize according to the NAT action. */
981 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
982 /* Action is set up to establish a new
985 ? nf_nat_setup_info(ct, range, maniptype)
986 : nf_nat_alloc_null_binding(ct, hooknum);
987 if (err != NF_ACCEPT)
992 case IP_CT_ESTABLISHED:
993 case IP_CT_ESTABLISHED_REPLY:
1001 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
1002 if (err == NF_ACCEPT) {
1003 if (maniptype == NF_NAT_MANIP_SRC)
1004 tc_skb_cb(skb)->post_ct_snat = 1;
1005 if (maniptype == NF_NAT_MANIP_DST)
1006 tc_skb_cb(skb)->post_ct_dnat = 1;
1011 #endif /* CONFIG_NF_NAT */
1013 static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
1015 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
1021 new_mark = mark | (READ_ONCE(ct->mark) & ~(mask));
1022 if (READ_ONCE(ct->mark) != new_mark) {
1023 WRITE_ONCE(ct->mark, new_mark);
1024 if (nf_ct_is_confirmed(ct))
1025 nf_conntrack_event_cache(IPCT_MARK, ct);
1030 static void tcf_ct_act_set_labels(struct nf_conn *ct,
1034 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
1035 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
1037 if (!memchr_inv(labels_m, 0, labels_sz))
1040 nf_connlabels_replace(ct, labels, labels_m, 4);
1044 static int tcf_ct_act_nat(struct sk_buff *skb,
1046 enum ip_conntrack_info ctinfo,
1048 struct nf_nat_range2 *range,
1051 #if IS_ENABLED(CONFIG_NF_NAT)
1053 enum nf_nat_manip_type maniptype;
1055 if (!(ct_action & TCA_CT_ACT_NAT))
1058 /* Add NAT extension if not confirmed yet. */
1059 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
1060 return NF_DROP; /* Can't NAT. */
1062 if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
1063 (ctinfo != IP_CT_RELATED || commit)) {
1064 /* NAT an established or related connection like before. */
1065 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
1066 /* This is the REPLY direction for a connection
1067 * for which NAT was applied in the forward
1068 * direction. Do the reverse NAT.
1070 maniptype = ct->status & IPS_SRC_NAT
1071 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
1073 maniptype = ct->status & IPS_SRC_NAT
1074 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
1075 } else if (ct_action & TCA_CT_ACT_NAT_SRC) {
1076 maniptype = NF_NAT_MANIP_SRC;
1077 } else if (ct_action & TCA_CT_ACT_NAT_DST) {
1078 maniptype = NF_NAT_MANIP_DST;
1083 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
1084 if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
1085 if (ct->status & IPS_SRC_NAT) {
1086 if (maniptype == NF_NAT_MANIP_SRC)
1087 maniptype = NF_NAT_MANIP_DST;
1089 maniptype = NF_NAT_MANIP_SRC;
1091 err = ct_nat_execute(skb, ct, ctinfo, range,
1093 } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
1094 err = ct_nat_execute(skb, ct, ctinfo, NULL,
1104 static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
1105 struct tcf_result *res)
1107 struct net *net = dev_net(skb->dev);
1108 bool cached, commit, clear, force;
1109 enum ip_conntrack_info ctinfo;
1110 struct tcf_ct *c = to_ct(a);
1111 struct nf_conn *tmpl = NULL;
1112 struct nf_hook_state state;
1113 int nh_ofs, err, retval;
1114 struct tcf_ct_params *p;
1115 bool skip_add = false;
1116 bool defrag = false;
1120 p = rcu_dereference_bh(c->params);
1122 retval = READ_ONCE(c->tcf_action);
1123 commit = p->ct_action & TCA_CT_ACT_COMMIT;
1124 clear = p->ct_action & TCA_CT_ACT_CLEAR;
1125 force = p->ct_action & TCA_CT_ACT_FORCE;
1128 tcf_lastuse_update(&c->tcf_tm);
1129 tcf_action_update_bstats(&c->common, skb);
1132 tc_skb_cb(skb)->post_ct = false;
1133 ct = nf_ct_get(skb, &ctinfo);
1136 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
1142 family = tcf_ct_skb_nf_family(skb);
1143 if (family == NFPROTO_UNSPEC)
1146 /* The conntrack module expects to be working at L3.
1147 * We also try to pull the IPv4/6 header to linear area
1149 nh_ofs = skb_network_offset(skb);
1150 skb_pull_rcsum(skb, nh_ofs);
1151 err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
1155 err = tcf_ct_skb_network_trim(skb, family);
1159 /* If we are recirculating packets to match on ct fields and
1160 * committing with a separate ct action, then we don't need to
1161 * actually run the packet through conntrack twice unless it's for a
1164 cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
1166 if (tcf_ct_flow_table_lookup(p, skb, family)) {
1171 /* Associate skb with specified zone. */
1173 nf_conntrack_put(skb_nfct(skb));
1174 nf_conntrack_get(&tmpl->ct_general);
1175 nf_ct_set(skb, tmpl, IP_CT_NEW);
1178 state.hook = NF_INET_PRE_ROUTING;
1181 err = nf_conntrack_in(skb, &state);
1182 if (err != NF_ACCEPT)
1187 ct = nf_ct_get(skb, &ctinfo);
1190 nf_ct_deliver_cached_events(ct);
1191 nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
1193 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1194 if (err != NF_ACCEPT)
1198 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1199 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1201 if (!nf_ct_is_confirmed(ct))
1202 nf_conn_act_ct_ext_add(skb, ct, ctinfo);
1204 /* This will take care of sending queued events
1205 * even if the connection is already confirmed.
1207 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1212 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1215 skb_push_rcsum(skb, nh_ofs);
1217 tc_skb_cb(skb)->post_ct = true;
1218 tc_skb_cb(skb)->zone = p->zone;
1221 qdisc_skb_cb(skb)->pkt_len = skb->len;
1225 if (err != -EINPROGRESS)
1226 tcf_action_inc_drop_qstats(&c->common);
1227 return TC_ACT_CONSUMED;
1230 tcf_action_inc_drop_qstats(&c->common);
1234 static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1235 [TCA_CT_ACTION] = { .type = NLA_U16 },
1236 [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1237 [TCA_CT_ZONE] = { .type = NLA_U16 },
1238 [TCA_CT_MARK] = { .type = NLA_U32 },
1239 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1240 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1241 .len = 128 / BITS_PER_BYTE },
1242 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1243 .len = 128 / BITS_PER_BYTE },
1244 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1245 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1246 [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1247 [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1248 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1249 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1252 static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1255 struct netlink_ext_ack *extack)
1257 struct nf_nat_range2 *range;
1259 if (!(p->ct_action & TCA_CT_ACT_NAT))
1262 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1263 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1267 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1270 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1271 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1272 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1277 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1278 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1280 p->ipv4_range = true;
1281 range->flags |= NF_NAT_RANGE_MAP_IPS;
1282 range->min_addr.ip =
1283 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1285 range->max_addr.ip = max_attr ?
1286 nla_get_in_addr(max_attr) :
1288 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1289 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1291 p->ipv4_range = false;
1292 range->flags |= NF_NAT_RANGE_MAP_IPS;
1293 range->min_addr.in6 =
1294 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1296 range->max_addr.in6 = max_attr ?
1297 nla_get_in6_addr(max_attr) :
1298 range->min_addr.in6;
1301 if (tb[TCA_CT_NAT_PORT_MIN]) {
1302 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1303 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1305 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1306 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1307 range->min_proto.all;
1313 static void tcf_ct_set_key_val(struct nlattr **tb,
1314 void *val, int val_type,
1315 void *mask, int mask_type,
1320 nla_memcpy(val, tb[val_type], len);
1325 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1326 memset(mask, 0xff, len);
1328 nla_memcpy(mask, tb[mask_type], len);
1331 static int tcf_ct_fill_params(struct net *net,
1332 struct tcf_ct_params *p,
1335 struct netlink_ext_ack *extack)
1337 struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
1338 struct nf_conntrack_zone zone;
1339 struct nf_conn *tmpl;
1342 p->zone = NF_CT_DEFAULT_ZONE_ID;
1344 tcf_ct_set_key_val(tb,
1345 &p->ct_action, TCA_CT_ACTION,
1346 NULL, TCA_CT_UNSPEC,
1347 sizeof(p->ct_action));
1349 if (p->ct_action & TCA_CT_ACT_CLEAR)
1352 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1356 if (tb[TCA_CT_MARK]) {
1357 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1358 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1361 tcf_ct_set_key_val(tb,
1362 &p->mark, TCA_CT_MARK,
1363 &p->mark_mask, TCA_CT_MARK_MASK,
1367 if (tb[TCA_CT_LABELS]) {
1368 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1369 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1374 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1377 tcf_ct_set_key_val(tb,
1378 p->labels, TCA_CT_LABELS,
1379 p->labels_mask, TCA_CT_LABELS_MASK,
1383 if (tb[TCA_CT_ZONE]) {
1384 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1385 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1389 tcf_ct_set_key_val(tb,
1390 &p->zone, TCA_CT_ZONE,
1391 NULL, TCA_CT_UNSPEC,
1395 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1396 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1398 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1401 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1407 static int tcf_ct_init(struct net *net, struct nlattr *nla,
1408 struct nlattr *est, struct tc_action **a,
1409 struct tcf_proto *tp, u32 flags,
1410 struct netlink_ext_ack *extack)
1412 struct tc_action_net *tn = net_generic(net, act_ct_ops.net_id);
1413 bool bind = flags & TCA_ACT_FLAGS_BIND;
1414 struct tcf_ct_params *params = NULL;
1415 struct nlattr *tb[TCA_CT_MAX + 1];
1416 struct tcf_chain *goto_ch = NULL;
1423 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1427 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1431 if (!tb[TCA_CT_PARMS]) {
1432 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1435 parm = nla_data(tb[TCA_CT_PARMS]);
1436 index = parm->index;
1437 err = tcf_idr_check_alloc(tn, &index, a, bind);
1442 err = tcf_idr_create_from_flags(tn, index, est, a,
1443 &act_ct_ops, bind, flags);
1445 tcf_idr_cleanup(tn, index);
1448 res = ACT_P_CREATED;
1453 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
1454 tcf_idr_release(*a, bind);
1458 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1464 params = kzalloc(sizeof(*params), GFP_KERNEL);
1465 if (unlikely(!params)) {
1470 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1474 err = tcf_ct_flow_table_get(net, params);
1478 spin_lock_bh(&c->tcf_lock);
1479 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1480 params = rcu_replace_pointer(c->params, params,
1481 lockdep_is_held(&c->tcf_lock));
1482 spin_unlock_bh(&c->tcf_lock);
1485 tcf_chain_put_by_act(goto_ch);
1487 call_rcu(¶ms->rcu, tcf_ct_params_free_rcu);
1493 tcf_chain_put_by_act(goto_ch);
1495 tcf_ct_params_free(params);
1496 tcf_idr_release(*a, bind);
1500 static void tcf_ct_cleanup(struct tc_action *a)
1502 struct tcf_ct_params *params;
1503 struct tcf_ct *c = to_ct(a);
1505 params = rcu_dereference_protected(c->params, 1);
1507 call_rcu(¶ms->rcu, tcf_ct_params_free_rcu);
1510 static int tcf_ct_dump_key_val(struct sk_buff *skb,
1511 void *val, int val_type,
1512 void *mask, int mask_type,
1517 if (mask && !memchr_inv(mask, 0, len))
1520 err = nla_put(skb, val_type, len, val);
1524 if (mask_type != TCA_CT_UNSPEC) {
1525 err = nla_put(skb, mask_type, len, mask);
1533 static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1535 struct nf_nat_range2 *range = &p->range;
1537 if (!(p->ct_action & TCA_CT_ACT_NAT))
1540 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1543 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1544 if (p->ipv4_range) {
1545 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1546 range->min_addr.ip))
1548 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1549 range->max_addr.ip))
1552 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1553 &range->min_addr.in6))
1555 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1556 &range->max_addr.in6))
1561 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1562 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1563 range->min_proto.all))
1565 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1566 range->max_proto.all))
1573 static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1576 unsigned char *b = skb_tail_pointer(skb);
1577 struct tcf_ct *c = to_ct(a);
1578 struct tcf_ct_params *p;
1580 struct tc_ct opt = {
1581 .index = c->tcf_index,
1582 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1583 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1587 spin_lock_bh(&c->tcf_lock);
1588 p = rcu_dereference_protected(c->params,
1589 lockdep_is_held(&c->tcf_lock));
1590 opt.action = c->tcf_action;
1592 if (tcf_ct_dump_key_val(skb,
1593 &p->ct_action, TCA_CT_ACTION,
1594 NULL, TCA_CT_UNSPEC,
1595 sizeof(p->ct_action)))
1596 goto nla_put_failure;
1598 if (p->ct_action & TCA_CT_ACT_CLEAR)
1601 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1602 tcf_ct_dump_key_val(skb,
1603 &p->mark, TCA_CT_MARK,
1604 &p->mark_mask, TCA_CT_MARK_MASK,
1606 goto nla_put_failure;
1608 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1609 tcf_ct_dump_key_val(skb,
1610 p->labels, TCA_CT_LABELS,
1611 p->labels_mask, TCA_CT_LABELS_MASK,
1613 goto nla_put_failure;
1615 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1616 tcf_ct_dump_key_val(skb,
1617 &p->zone, TCA_CT_ZONE,
1618 NULL, TCA_CT_UNSPEC,
1620 goto nla_put_failure;
1622 if (tcf_ct_dump_nat(skb, p))
1623 goto nla_put_failure;
1626 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1627 goto nla_put_failure;
1629 tcf_tm_dump(&t, &c->tcf_tm);
1630 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1631 goto nla_put_failure;
1632 spin_unlock_bh(&c->tcf_lock);
1636 spin_unlock_bh(&c->tcf_lock);
1641 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1642 u64 drops, u64 lastuse, bool hw)
1644 struct tcf_ct *c = to_ct(a);
1646 tcf_action_update_stats(a, bytes, packets, drops, hw);
1647 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1650 static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
1651 u32 *index_inc, bool bind,
1652 struct netlink_ext_ack *extack)
1655 struct flow_action_entry *entry = entry_data;
1657 entry->id = FLOW_ACTION_CT;
1658 entry->ct.action = tcf_ct_action(act);
1659 entry->ct.zone = tcf_ct_zone(act);
1660 entry->ct.flow_table = tcf_ct_ft(act);
1663 struct flow_offload_action *fl_action = entry_data;
1665 fl_action->id = FLOW_ACTION_CT;
1671 static struct tc_action_ops act_ct_ops = {
1674 .owner = THIS_MODULE,
1676 .dump = tcf_ct_dump,
1677 .init = tcf_ct_init,
1678 .cleanup = tcf_ct_cleanup,
1679 .stats_update = tcf_stats_update,
1680 .offload_act_setup = tcf_ct_offload_act_setup,
1681 .size = sizeof(struct tcf_ct),
1684 static __net_init int ct_init_net(struct net *net)
1686 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1687 struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
1689 if (nf_connlabels_get(net, n_bits - 1)) {
1691 pr_err("act_ct: Failed to set connlabels length");
1696 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1699 static void __net_exit ct_exit_net(struct list_head *net_list)
1704 list_for_each_entry(net, net_list, exit_list) {
1705 struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
1708 nf_connlabels_put(net);
1712 tc_action_net_exit(net_list, act_ct_ops.net_id);
1715 static struct pernet_operations ct_net_ops = {
1716 .init = ct_init_net,
1717 .exit_batch = ct_exit_net,
1718 .id = &act_ct_ops.net_id,
1719 .size = sizeof(struct tc_ct_action_net),
1722 static int __init ct_init_module(void)
1726 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1730 err = tcf_ct_flow_tables_init();
1734 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1738 static_branch_inc(&tcf_frag_xmit_count);
1743 tcf_ct_flow_tables_uninit();
1745 destroy_workqueue(act_ct_wq);
1749 static void __exit ct_cleanup_module(void)
1751 static_branch_dec(&tcf_frag_xmit_count);
1752 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1753 tcf_ct_flow_tables_uninit();
1754 destroy_workqueue(act_ct_wq);
1757 module_init(ct_init_module);
1758 module_exit(ct_cleanup_module);
1759 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1760 MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1761 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1762 MODULE_DESCRIPTION("Connection tracking action");
1763 MODULE_LICENSE("GPL v2");