1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015 Nicira, Inc.
6 #include <linux/module.h>
7 #include <linux/openvswitch.h>
10 #include <linux/sctp.h>
11 #include <linux/static_key.h>
13 #include <net/genetlink.h>
14 #include <net/netfilter/nf_conntrack_core.h>
15 #include <net/netfilter/nf_conntrack_count.h>
16 #include <net/netfilter/nf_conntrack_helper.h>
17 #include <net/netfilter/nf_conntrack_labels.h>
18 #include <net/netfilter/nf_conntrack_seqadj.h>
19 #include <net/netfilter/nf_conntrack_timeout.h>
20 #include <net/netfilter/nf_conntrack_zones.h>
21 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
22 #include <net/ipv6_frag.h>
24 #if IS_ENABLED(CONFIG_NF_NAT)
25 #include <net/netfilter/nf_nat.h>
28 #include <net/netfilter/nf_conntrack_act_ct.h>
31 #include "conntrack.h"
33 #include "flow_netlink.h"
35 struct ovs_ct_len_tbl {
40 /* Metadata mark for masked write to conntrack mark */
46 /* Metadata label for masked write to conntrack label. */
48 struct ovs_key_ct_labels value;
49 struct ovs_key_ct_labels mask;
53 OVS_CT_NAT = 1 << 0, /* NAT for committed connections only. */
54 OVS_CT_SRC_NAT = 1 << 1, /* Source NAT for NEW connections. */
55 OVS_CT_DST_NAT = 1 << 2, /* Destination NAT for NEW connections. */
58 /* Conntrack action context for execution. */
59 struct ovs_conntrack_info {
60 struct nf_conntrack_helper *helper;
61 struct nf_conntrack_zone zone;
64 u8 nat : 3; /* enum ovs_ct_nat */
66 u8 have_eventmask : 1;
68 u32 eventmask; /* Mask of 1 << IPCT_*. */
70 struct md_labels labels;
71 char timeout[CTNL_TIMEOUT_NAME_MAX];
72 struct nf_ct_timeout *nf_ct_timeout;
73 #if IS_ENABLED(CONFIG_NF_NAT)
74 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
78 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
79 #define OVS_CT_LIMIT_UNLIMITED 0
80 #define OVS_CT_LIMIT_DEFAULT OVS_CT_LIMIT_UNLIMITED
81 #define CT_LIMIT_HASH_BUCKETS 512
82 static DEFINE_STATIC_KEY_FALSE(ovs_ct_limit_enabled);
85 /* Elements in ovs_ct_limit_info->limits hash table */
86 struct hlist_node hlist_node;
92 struct ovs_ct_limit_info {
94 struct hlist_head *limits;
95 struct nf_conncount_data *data;
98 static const struct nla_policy ct_limit_policy[OVS_CT_LIMIT_ATTR_MAX + 1] = {
99 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NLA_NESTED, },
103 static bool labels_nonzero(const struct ovs_key_ct_labels *labels);
105 static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info);
107 static u16 key_to_nfproto(const struct sw_flow_key *key)
109 switch (ntohs(key->eth.type)) {
115 return NFPROTO_UNSPEC;
119 /* Map SKB connection state into the values used by flow definition. */
120 static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo)
122 u8 ct_state = OVS_CS_F_TRACKED;
125 case IP_CT_ESTABLISHED_REPLY:
126 case IP_CT_RELATED_REPLY:
127 ct_state |= OVS_CS_F_REPLY_DIR;
134 case IP_CT_ESTABLISHED:
135 case IP_CT_ESTABLISHED_REPLY:
136 ct_state |= OVS_CS_F_ESTABLISHED;
139 case IP_CT_RELATED_REPLY:
140 ct_state |= OVS_CS_F_RELATED;
143 ct_state |= OVS_CS_F_NEW;
152 static u32 ovs_ct_get_mark(const struct nf_conn *ct)
154 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
155 return ct ? ct->mark : 0;
161 /* Guard against conntrack labels max size shrinking below 128 bits. */
162 #if NF_CT_LABELS_MAX_SIZE < 16
163 #error NF_CT_LABELS_MAX_SIZE must be at least 16 bytes
166 static void ovs_ct_get_labels(const struct nf_conn *ct,
167 struct ovs_key_ct_labels *labels)
169 struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
172 memcpy(labels, cl->bits, OVS_CT_LABELS_LEN);
174 memset(labels, 0, OVS_CT_LABELS_LEN);
177 static void __ovs_ct_update_key_orig_tp(struct sw_flow_key *key,
178 const struct nf_conntrack_tuple *orig,
181 key->ct_orig_proto = orig->dst.protonum;
182 if (orig->dst.protonum == icmp_proto) {
183 key->ct.orig_tp.src = htons(orig->dst.u.icmp.type);
184 key->ct.orig_tp.dst = htons(orig->dst.u.icmp.code);
186 key->ct.orig_tp.src = orig->src.u.all;
187 key->ct.orig_tp.dst = orig->dst.u.all;
191 static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
192 const struct nf_conntrack_zone *zone,
193 const struct nf_conn *ct)
195 key->ct_state = state;
196 key->ct_zone = zone->id;
197 key->ct.mark = ovs_ct_get_mark(ct);
198 ovs_ct_get_labels(ct, &key->ct.labels);
201 const struct nf_conntrack_tuple *orig;
203 /* Use the master if we have one. */
206 orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
208 /* IP version must match with the master connection. */
209 if (key->eth.type == htons(ETH_P_IP) &&
210 nf_ct_l3num(ct) == NFPROTO_IPV4) {
211 key->ipv4.ct_orig.src = orig->src.u3.ip;
212 key->ipv4.ct_orig.dst = orig->dst.u3.ip;
213 __ovs_ct_update_key_orig_tp(key, orig, IPPROTO_ICMP);
215 } else if (key->eth.type == htons(ETH_P_IPV6) &&
216 !sw_flow_key_is_nd(key) &&
217 nf_ct_l3num(ct) == NFPROTO_IPV6) {
218 key->ipv6.ct_orig.src = orig->src.u3.in6;
219 key->ipv6.ct_orig.dst = orig->dst.u3.in6;
220 __ovs_ct_update_key_orig_tp(key, orig, NEXTHDR_ICMP);
224 /* Clear 'ct_orig_proto' to mark the non-existence of conntrack
225 * original direction key fields.
227 key->ct_orig_proto = 0;
230 /* Update 'key' based on skb->_nfct. If 'post_ct' is true, then OVS has
231 * previously sent the packet to conntrack via the ct action. If
232 * 'keep_nat_flags' is true, the existing NAT flags retained, else they are
233 * initialized from the connection status.
235 static void ovs_ct_update_key(const struct sk_buff *skb,
236 const struct ovs_conntrack_info *info,
237 struct sw_flow_key *key, bool post_ct,
240 const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
241 enum ip_conntrack_info ctinfo;
245 ct = nf_ct_get(skb, &ctinfo);
247 state = ovs_ct_get_state(ctinfo);
248 /* All unconfirmed entries are NEW connections. */
249 if (!nf_ct_is_confirmed(ct))
250 state |= OVS_CS_F_NEW;
251 /* OVS persists the related flag for the duration of the
255 state |= OVS_CS_F_RELATED;
256 if (keep_nat_flags) {
257 state |= key->ct_state & OVS_CS_F_NAT_MASK;
259 if (ct->status & IPS_SRC_NAT)
260 state |= OVS_CS_F_SRC_NAT;
261 if (ct->status & IPS_DST_NAT)
262 state |= OVS_CS_F_DST_NAT;
264 zone = nf_ct_zone(ct);
265 } else if (post_ct) {
266 state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID;
270 __ovs_ct_update_key(key, state, zone, ct);
273 /* This is called to initialize CT key fields possibly coming in from the local
276 void ovs_ct_fill_key(const struct sk_buff *skb,
277 struct sw_flow_key *key,
280 ovs_ct_update_key(skb, NULL, key, post_ct, false);
283 int ovs_ct_put_key(const struct sw_flow_key *swkey,
284 const struct sw_flow_key *output, struct sk_buff *skb)
286 if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, output->ct_state))
289 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
290 nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, output->ct_zone))
293 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
294 nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, output->ct.mark))
297 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
298 nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(output->ct.labels),
302 if (swkey->ct_orig_proto) {
303 if (swkey->eth.type == htons(ETH_P_IP)) {
304 struct ovs_key_ct_tuple_ipv4 orig;
306 memset(&orig, 0, sizeof(orig));
307 orig.ipv4_src = output->ipv4.ct_orig.src;
308 orig.ipv4_dst = output->ipv4.ct_orig.dst;
309 orig.src_port = output->ct.orig_tp.src;
310 orig.dst_port = output->ct.orig_tp.dst;
311 orig.ipv4_proto = output->ct_orig_proto;
313 if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
314 sizeof(orig), &orig))
316 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
317 struct ovs_key_ct_tuple_ipv6 orig;
319 memset(&orig, 0, sizeof(orig));
320 memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32,
321 sizeof(orig.ipv6_src));
322 memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32,
323 sizeof(orig.ipv6_dst));
324 orig.src_port = output->ct.orig_tp.src;
325 orig.dst_port = output->ct.orig_tp.dst;
326 orig.ipv6_proto = output->ct_orig_proto;
328 if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
329 sizeof(orig), &orig))
337 static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key,
338 u32 ct_mark, u32 mask)
340 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
343 new_mark = ct_mark | (ct->mark & ~(mask));
344 if (ct->mark != new_mark) {
346 if (nf_ct_is_confirmed(ct))
347 nf_conntrack_event_cache(IPCT_MARK, ct);
348 key->ct.mark = new_mark;
357 static struct nf_conn_labels *ovs_ct_get_conn_labels(struct nf_conn *ct)
359 struct nf_conn_labels *cl;
361 cl = nf_ct_labels_find(ct);
363 nf_ct_labels_ext_add(ct);
364 cl = nf_ct_labels_find(ct);
370 /* Initialize labels for a new, yet to be committed conntrack entry. Note that
371 * since the new connection is not yet confirmed, and thus no-one else has
372 * access to it's labels, we simply write them over.
374 static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key,
375 const struct ovs_key_ct_labels *labels,
376 const struct ovs_key_ct_labels *mask)
378 struct nf_conn_labels *cl, *master_cl;
379 bool have_mask = labels_nonzero(mask);
381 /* Inherit master's labels to the related connection? */
382 master_cl = ct->master ? nf_ct_labels_find(ct->master) : NULL;
384 if (!master_cl && !have_mask)
385 return 0; /* Nothing to do. */
387 cl = ovs_ct_get_conn_labels(ct);
391 /* Inherit the master's labels, if any. */
396 u32 *dst = (u32 *)cl->bits;
399 for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
400 dst[i] = (dst[i] & ~mask->ct_labels_32[i]) |
401 (labels->ct_labels_32[i]
402 & mask->ct_labels_32[i]);
405 /* Labels are included in the IPCTNL_MSG_CT_NEW event only if the
406 * IPCT_LABEL bit is set in the event cache.
408 nf_conntrack_event_cache(IPCT_LABEL, ct);
410 memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
415 static int ovs_ct_set_labels(struct nf_conn *ct, struct sw_flow_key *key,
416 const struct ovs_key_ct_labels *labels,
417 const struct ovs_key_ct_labels *mask)
419 struct nf_conn_labels *cl;
422 cl = ovs_ct_get_conn_labels(ct);
426 err = nf_connlabels_replace(ct, labels->ct_labels_32,
428 OVS_CT_LABELS_LEN_32);
432 memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
437 /* 'skb' should already be pulled to nh_ofs. */
438 static int ovs_ct_helper(struct sk_buff *skb, u16 proto)
440 const struct nf_conntrack_helper *helper;
441 const struct nf_conn_help *help;
442 enum ip_conntrack_info ctinfo;
443 unsigned int protoff;
447 ct = nf_ct_get(skb, &ctinfo);
448 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
451 help = nfct_help(ct);
455 helper = rcu_dereference(help->helper);
461 protoff = ip_hdrlen(skb);
464 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
468 ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
470 if (ofs < 0 || (frag_off & htons(~0x7)) != 0) {
471 pr_debug("proto header not found\n");
478 WARN_ONCE(1, "helper invoked on non-IP family!");
482 err = helper->help(skb, protoff, ct, ctinfo);
483 if (err != NF_ACCEPT)
486 /* Adjust seqs after helper. This is needed due to some helpers (e.g.,
487 * FTP with NAT) adusting the TCP payload size when mangling IP
488 * addresses and/or port numbers in the text-based control connection.
490 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
491 !nf_ct_seq_adjust(skb, ct, ctinfo, protoff))
496 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
497 * value if 'skb' is freed.
499 static int handle_fragments(struct net *net, struct sw_flow_key *key,
500 u16 zone, struct sk_buff *skb)
502 struct ovs_skb_cb ovs_cb = *OVS_CB(skb);
505 if (key->eth.type == htons(ETH_P_IP)) {
506 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
508 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
509 err = ip_defrag(net, skb, user);
513 ovs_cb.mru = IPCB(skb)->frag_max_size;
514 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
515 } else if (key->eth.type == htons(ETH_P_IPV6)) {
516 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
518 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
519 err = nf_ct_frag6_gather(net, skb, user);
521 if (err != -EINPROGRESS)
526 key->ip.proto = ipv6_hdr(skb)->nexthdr;
527 ovs_cb.mru = IP6CB(skb)->frag_max_size;
531 return -EPFNOSUPPORT;
534 /* The key extracted from the fragment that completed this datagram
535 * likely didn't have an L4 header, so regenerate it.
537 ovs_flow_key_update_l3l4(skb, key);
539 key->ip.frag = OVS_FRAG_TYPE_NONE;
542 *OVS_CB(skb) = ovs_cb;
547 static struct nf_conntrack_expect *
548 ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone,
549 u16 proto, const struct sk_buff *skb)
551 struct nf_conntrack_tuple tuple;
552 struct nf_conntrack_expect *exp;
554 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple))
557 exp = __nf_ct_expect_find(net, zone, &tuple);
559 struct nf_conntrack_tuple_hash *h;
561 /* Delete existing conntrack entry, if it clashes with the
562 * expectation. This can happen since conntrack ALGs do not
563 * check for clashes between (new) expectations and existing
564 * conntrack entries. nf_conntrack_in() will check the
565 * expectations only if a conntrack entry can not be found,
566 * which can lead to OVS finding the expectation (here) in the
567 * init direction, but which will not be removed by the
568 * nf_conntrack_in() call, if a matching conntrack entry is
569 * found instead. In this case all init direction packets
570 * would be reported as new related packets, while reply
571 * direction packets would be reported as un-related
572 * established packets.
574 h = nf_conntrack_find_get(net, zone, &tuple);
576 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
578 nf_ct_delete(ct, 0, 0);
586 /* This replicates logic from nf_conntrack_core.c that is not exported. */
587 static enum ip_conntrack_info
588 ovs_ct_get_info(const struct nf_conntrack_tuple_hash *h)
590 const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
592 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
593 return IP_CT_ESTABLISHED_REPLY;
594 /* Once we've had two way comms, always ESTABLISHED. */
595 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
596 return IP_CT_ESTABLISHED;
597 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
598 return IP_CT_RELATED;
602 /* Find an existing connection which this packet belongs to without
603 * re-attributing statistics or modifying the connection state. This allows an
604 * skb->_nfct lost due to an upcall to be recovered during actions execution.
606 * Must be called with rcu_read_lock.
608 * On success, populates skb->_nfct and returns the connection. Returns NULL
609 * if there is no existing entry.
611 static struct nf_conn *
612 ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
613 u8 l3num, struct sk_buff *skb, bool natted)
615 struct nf_conntrack_tuple tuple;
616 struct nf_conntrack_tuple_hash *h;
619 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), l3num,
621 pr_debug("ovs_ct_find_existing: Can't get tuple\n");
625 /* Must invert the tuple if skb has been transformed by NAT. */
627 struct nf_conntrack_tuple inverse;
629 if (!nf_ct_invert_tuple(&inverse, &tuple)) {
630 pr_debug("ovs_ct_find_existing: Inversion failed!\n");
636 /* look for tuple match */
637 h = nf_conntrack_find_get(net, zone, &tuple);
639 return NULL; /* Not found. */
641 ct = nf_ct_tuplehash_to_ctrack(h);
643 /* Inverted packet tuple matches the reverse direction conntrack tuple,
644 * select the other tuplehash to get the right 'ctinfo' bits for this
648 h = &ct->tuplehash[!h->tuple.dst.dir];
650 nf_ct_set(skb, ct, ovs_ct_get_info(h));
655 struct nf_conn *ovs_ct_executed(struct net *net,
656 const struct sw_flow_key *key,
657 const struct ovs_conntrack_info *info,
661 struct nf_conn *ct = NULL;
663 /* If no ct, check if we have evidence that an existing conntrack entry
664 * might be found for this skb. This happens when we lose a skb->_nfct
665 * due to an upcall, or if the direction is being forced. If the
666 * connection was not confirmed, it is not cached and needs to be run
667 * through conntrack again.
669 *ct_executed = (key->ct_state & OVS_CS_F_TRACKED) &&
670 !(key->ct_state & OVS_CS_F_INVALID) &&
671 (key->ct_zone == info->zone.id);
673 if (*ct_executed || (!key->ct_state && info->force)) {
674 ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
682 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
683 static bool skb_nfct_cached(struct net *net,
684 const struct sw_flow_key *key,
685 const struct ovs_conntrack_info *info,
688 enum ip_conntrack_info ctinfo;
690 bool ct_executed = true;
692 ct = nf_ct_get(skb, &ctinfo);
694 ct = ovs_ct_executed(net, key, info, skb, &ct_executed);
697 nf_ct_get(skb, &ctinfo);
701 if (!net_eq(net, read_pnet(&ct->ct_net)))
703 if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct)))
706 struct nf_conn_help *help;
708 help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
709 if (help && rcu_access_pointer(help->helper) != info->helper)
712 if (info->nf_ct_timeout) {
713 struct nf_conn_timeout *timeout_ext;
715 timeout_ext = nf_ct_timeout_find(ct);
716 if (!timeout_ext || info->nf_ct_timeout !=
717 rcu_dereference(timeout_ext->timeout))
720 /* Force conntrack entry direction to the current packet? */
721 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
722 /* Delete the conntrack entry if confirmed, else just release
725 if (nf_ct_is_confirmed(ct))
726 nf_ct_delete(ct, 0, 0);
729 nf_ct_set(skb, NULL, 0);
736 #if IS_ENABLED(CONFIG_NF_NAT)
737 static void ovs_nat_update_key(struct sw_flow_key *key,
738 const struct sk_buff *skb,
739 enum nf_nat_manip_type maniptype)
741 if (maniptype == NF_NAT_MANIP_SRC) {
744 key->ct_state |= OVS_CS_F_SRC_NAT;
745 if (key->eth.type == htons(ETH_P_IP))
746 key->ipv4.addr.src = ip_hdr(skb)->saddr;
747 else if (key->eth.type == htons(ETH_P_IPV6))
748 memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr,
749 sizeof(key->ipv6.addr.src));
753 if (key->ip.proto == IPPROTO_UDP)
754 src = udp_hdr(skb)->source;
755 else if (key->ip.proto == IPPROTO_TCP)
756 src = tcp_hdr(skb)->source;
757 else if (key->ip.proto == IPPROTO_SCTP)
758 src = sctp_hdr(skb)->source;
766 key->ct_state |= OVS_CS_F_DST_NAT;
767 if (key->eth.type == htons(ETH_P_IP))
768 key->ipv4.addr.dst = ip_hdr(skb)->daddr;
769 else if (key->eth.type == htons(ETH_P_IPV6))
770 memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr,
771 sizeof(key->ipv6.addr.dst));
775 if (key->ip.proto == IPPROTO_UDP)
776 dst = udp_hdr(skb)->dest;
777 else if (key->ip.proto == IPPROTO_TCP)
778 dst = tcp_hdr(skb)->dest;
779 else if (key->ip.proto == IPPROTO_SCTP)
780 dst = sctp_hdr(skb)->dest;
788 /* Modelled after nf_nat_ipv[46]_fn().
789 * range is only used for new, uninitialized NAT state.
790 * Returns either NF_ACCEPT or NF_DROP.
792 static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
793 enum ip_conntrack_info ctinfo,
794 const struct nf_nat_range2 *range,
795 enum nf_nat_manip_type maniptype, struct sw_flow_key *key)
797 int hooknum, nh_off, err = NF_ACCEPT;
799 nh_off = skb_network_offset(skb);
800 skb_pull_rcsum(skb, nh_off);
802 /* See HOOK2MANIP(). */
803 if (maniptype == NF_NAT_MANIP_SRC)
804 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
806 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
810 case IP_CT_RELATED_REPLY:
811 if (IS_ENABLED(CONFIG_NF_NAT) &&
812 skb->protocol == htons(ETH_P_IP) &&
813 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
814 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
818 } else if (IS_ENABLED(CONFIG_IPV6) &&
819 skb->protocol == htons(ETH_P_IPV6)) {
821 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
822 int hdrlen = ipv6_skip_exthdr(skb,
823 sizeof(struct ipv6hdr),
824 &nexthdr, &frag_off);
826 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
827 if (!nf_nat_icmpv6_reply_translation(skb, ct,
835 /* Non-ICMP, fall thru to initialize if needed. */
838 /* Seen it before? This can happen for loopback, retrans,
841 if (!nf_nat_initialized(ct, maniptype)) {
842 /* Initialize according to the NAT action. */
843 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
844 /* Action is set up to establish a new
847 ? nf_nat_setup_info(ct, range, maniptype)
848 : nf_nat_alloc_null_binding(ct, hooknum);
849 if (err != NF_ACCEPT)
854 case IP_CT_ESTABLISHED:
855 case IP_CT_ESTABLISHED_REPLY:
863 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
865 skb_push_rcsum(skb, nh_off);
867 /* Update the flow key if NAT successful. */
868 if (err == NF_ACCEPT)
869 ovs_nat_update_key(key, skb, maniptype);
874 /* Returns NF_DROP if the packet should be dropped, NF_ACCEPT otherwise. */
875 static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
876 const struct ovs_conntrack_info *info,
877 struct sk_buff *skb, struct nf_conn *ct,
878 enum ip_conntrack_info ctinfo)
880 enum nf_nat_manip_type maniptype;
883 /* Add NAT extension if not confirmed yet. */
884 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
885 return NF_ACCEPT; /* Can't NAT. */
887 /* Determine NAT type.
888 * Check if the NAT type can be deduced from the tracked connection.
889 * Make sure new expected connections (IP_CT_RELATED) are NATted only
892 if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW &&
893 ct->status & IPS_NAT_MASK &&
894 (ctinfo != IP_CT_RELATED || info->commit)) {
895 /* NAT an established or related connection like before. */
896 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
897 /* This is the REPLY direction for a connection
898 * for which NAT was applied in the forward
899 * direction. Do the reverse NAT.
901 maniptype = ct->status & IPS_SRC_NAT
902 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
904 maniptype = ct->status & IPS_SRC_NAT
905 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
906 } else if (info->nat & OVS_CT_SRC_NAT) {
907 maniptype = NF_NAT_MANIP_SRC;
908 } else if (info->nat & OVS_CT_DST_NAT) {
909 maniptype = NF_NAT_MANIP_DST;
911 return NF_ACCEPT; /* Connection is not NATed. */
913 err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype, key);
915 if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
916 if (ct->status & IPS_SRC_NAT) {
917 if (maniptype == NF_NAT_MANIP_SRC)
918 maniptype = NF_NAT_MANIP_DST;
920 maniptype = NF_NAT_MANIP_SRC;
922 err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
924 } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
925 err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL,
926 NF_NAT_MANIP_SRC, key);
932 #else /* !CONFIG_NF_NAT */
933 static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
934 const struct ovs_conntrack_info *info,
935 struct sk_buff *skb, struct nf_conn *ct,
936 enum ip_conntrack_info ctinfo)
942 /* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if
943 * not done already. Update key with new CT state after passing the packet
945 * Note that if the packet is deemed invalid by conntrack, skb->_nfct will be
946 * set to NULL and 0 will be returned.
948 static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
949 const struct ovs_conntrack_info *info,
952 /* If we are recirculating packets to match on conntrack fields and
953 * committing with a separate conntrack action, then we don't need to
954 * actually run the packet through conntrack twice unless it's for a
957 bool cached = skb_nfct_cached(net, key, info, skb);
958 enum ip_conntrack_info ctinfo;
962 struct nf_hook_state state = {
963 .hook = NF_INET_PRE_ROUTING,
967 struct nf_conn *tmpl = info->ct;
970 /* Associate skb with specified zone. */
972 ct = nf_ct_get(skb, &ctinfo);
974 nf_conntrack_get(&tmpl->ct_general);
975 nf_ct_set(skb, tmpl, IP_CT_NEW);
978 err = nf_conntrack_in(skb, &state);
979 if (err != NF_ACCEPT)
982 /* Clear CT state NAT flags to mark that we have not yet done
983 * NAT after the nf_conntrack_in() call. We can actually clear
984 * the whole state, as it will be re-initialized below.
988 /* Update the key, but keep the NAT flags. */
989 ovs_ct_update_key(skb, info, key, true, true);
992 ct = nf_ct_get(skb, &ctinfo);
994 bool add_helper = false;
996 /* Packets starting a new connection must be NATted before the
997 * helper, so that the helper knows about the NAT. We enforce
998 * this by delaying both NAT and helper calls for unconfirmed
999 * connections until the committing CT action. For later
1000 * packets NAT and Helper may be called in either order.
1002 * NAT will be done only if the CT action has NAT, and only
1003 * once per packet (per zone), as guarded by the NAT bits in
1004 * the key->ct_state.
1006 if (info->nat && !(key->ct_state & OVS_CS_F_NAT_MASK) &&
1007 (nf_ct_is_confirmed(ct) || info->commit) &&
1008 ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) {
1012 /* Userspace may decide to perform a ct lookup without a helper
1013 * specified followed by a (recirculate and) commit with one,
1014 * or attach a helper in a later commit. Therefore, for
1015 * connections which we will commit, we may need to attach
1018 if (info->commit && info->helper && !nfct_help(ct)) {
1019 int err = __nf_ct_try_assign_helper(ct, info->ct,
1025 /* helper installed, add seqadj if NAT is required */
1026 if (info->nat && !nfct_seqadj(ct)) {
1027 if (!nfct_seqadj_ext_add(ct))
1032 /* Call the helper only if:
1033 * - nf_conntrack_in() was executed above ("!cached") or a
1034 * helper was just attached ("add_helper") for a confirmed
1036 * - When committing an unconfirmed connection.
1038 if ((nf_ct_is_confirmed(ct) ? !cached || add_helper :
1040 ovs_ct_helper(skb, info->family) != NF_ACCEPT) {
1044 if (nf_ct_protonum(ct) == IPPROTO_TCP &&
1045 nf_ct_is_confirmed(ct) && nf_conntrack_tcp_established(ct)) {
1046 /* Be liberal for tcp packets so that out-of-window
1047 * packets are not marked invalid.
1049 nf_ct_set_tcp_be_liberal(ct);
1052 nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
1058 /* Lookup connection and read fields into key. */
1059 static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
1060 const struct ovs_conntrack_info *info,
1061 struct sk_buff *skb)
1063 struct nf_conntrack_expect *exp;
1065 /* If we pass an expected packet through nf_conntrack_in() the
1066 * expectation is typically removed, but the packet could still be
1067 * lost in upcall processing. To prevent this from happening we
1068 * perform an explicit expectation lookup. Expected connections are
1069 * always new, and will be passed through conntrack only when they are
1070 * committed, as it is OK to remove the expectation at that time.
1072 exp = ovs_ct_expect_find(net, &info->zone, info->family, skb);
1076 /* NOTE: New connections are NATted and Helped only when
1077 * committed, so we are not calling into NAT here.
1079 state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
1080 __ovs_ct_update_key(key, state, &info->zone, exp->master);
1085 err = __ovs_ct_lookup(net, key, info, skb);
1089 ct = (struct nf_conn *)skb_nfct(skb);
1091 nf_ct_deliver_cached_events(ct);
1097 static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
1101 for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
1102 if (labels->ct_labels_32[i])
1108 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1109 static struct hlist_head *ct_limit_hash_bucket(
1110 const struct ovs_ct_limit_info *info, u16 zone)
1112 return &info->limits[zone & (CT_LIMIT_HASH_BUCKETS - 1)];
1115 /* Call with ovs_mutex */
1116 static void ct_limit_set(const struct ovs_ct_limit_info *info,
1117 struct ovs_ct_limit *new_ct_limit)
1119 struct ovs_ct_limit *ct_limit;
1120 struct hlist_head *head;
1122 head = ct_limit_hash_bucket(info, new_ct_limit->zone);
1123 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
1124 if (ct_limit->zone == new_ct_limit->zone) {
1125 hlist_replace_rcu(&ct_limit->hlist_node,
1126 &new_ct_limit->hlist_node);
1127 kfree_rcu(ct_limit, rcu);
1132 hlist_add_head_rcu(&new_ct_limit->hlist_node, head);
1135 /* Call with ovs_mutex */
1136 static void ct_limit_del(const struct ovs_ct_limit_info *info, u16 zone)
1138 struct ovs_ct_limit *ct_limit;
1139 struct hlist_head *head;
1140 struct hlist_node *n;
1142 head = ct_limit_hash_bucket(info, zone);
1143 hlist_for_each_entry_safe(ct_limit, n, head, hlist_node) {
1144 if (ct_limit->zone == zone) {
1145 hlist_del_rcu(&ct_limit->hlist_node);
1146 kfree_rcu(ct_limit, rcu);
1152 /* Call with RCU read lock */
1153 static u32 ct_limit_get(const struct ovs_ct_limit_info *info, u16 zone)
1155 struct ovs_ct_limit *ct_limit;
1156 struct hlist_head *head;
1158 head = ct_limit_hash_bucket(info, zone);
1159 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
1160 if (ct_limit->zone == zone)
1161 return ct_limit->limit;
1164 return info->default_limit;
1167 static int ovs_ct_check_limit(struct net *net,
1168 const struct ovs_conntrack_info *info,
1169 const struct nf_conntrack_tuple *tuple)
1171 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1172 const struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
1173 u32 per_zone_limit, connections;
1176 conncount_key = info->zone.id;
1178 per_zone_limit = ct_limit_get(ct_limit_info, info->zone.id);
1179 if (per_zone_limit == OVS_CT_LIMIT_UNLIMITED)
1182 connections = nf_conncount_count(net, ct_limit_info->data,
1183 &conncount_key, tuple, &info->zone);
1184 if (connections > per_zone_limit)
1191 /* Lookup connection and confirm if unconfirmed. */
1192 static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
1193 const struct ovs_conntrack_info *info,
1194 struct sk_buff *skb)
1196 enum ip_conntrack_info ctinfo;
1200 err = __ovs_ct_lookup(net, key, info, skb);
1204 /* The connection could be invalid, in which case this is a no-op.*/
1205 ct = nf_ct_get(skb, &ctinfo);
1209 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1210 if (static_branch_unlikely(&ovs_ct_limit_enabled)) {
1211 if (!nf_ct_is_confirmed(ct)) {
1212 err = ovs_ct_check_limit(net, info,
1213 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
1215 net_warn_ratelimited("openvswitch: zone: %u "
1216 "exceeds conntrack limit\n",
1224 /* Set the conntrack event mask if given. NEW and DELETE events have
1225 * their own groups, but the NFNLGRP_CONNTRACK_UPDATE group listener
1226 * typically would receive many kinds of updates. Setting the event
1227 * mask allows those events to be filtered. The set event mask will
1228 * remain in effect for the lifetime of the connection unless changed
1229 * by a further CT action with both the commit flag and the eventmask
1231 if (info->have_eventmask) {
1232 struct nf_conntrack_ecache *cache = nf_ct_ecache_find(ct);
1235 cache->ctmask = info->eventmask;
1238 /* Apply changes before confirming the connection so that the initial
1239 * conntrack NEW netlink event carries the values given in the CT
1242 if (info->mark.mask) {
1243 err = ovs_ct_set_mark(ct, key, info->mark.value,
1248 if (!nf_ct_is_confirmed(ct)) {
1249 err = ovs_ct_init_labels(ct, key, &info->labels.value,
1250 &info->labels.mask);
1254 nf_conn_act_ct_ext_add(ct);
1255 } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1256 labels_nonzero(&info->labels.mask)) {
1257 err = ovs_ct_set_labels(ct, key, &info->labels.value,
1258 &info->labels.mask);
1262 /* This will take care of sending queued events even if the connection
1263 * is already confirmed.
1265 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1271 /* Trim the skb to the length specified by the IP/IPv6 header,
1272 * removing any trailing lower-layer padding. This prepares the skb
1273 * for higher-layer processing that assumes skb->len excludes padding
1274 * (such as nf_ip_checksum). The caller needs to pull the skb to the
1275 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
1277 static int ovs_skb_network_trim(struct sk_buff *skb)
1282 switch (skb->protocol) {
1283 case htons(ETH_P_IP):
1284 len = ntohs(ip_hdr(skb)->tot_len);
1286 case htons(ETH_P_IPV6):
1287 len = sizeof(struct ipv6hdr)
1288 + ntohs(ipv6_hdr(skb)->payload_len);
1294 err = pskb_trim_rcsum(skb, len);
1301 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
1302 * value if 'skb' is freed.
1304 int ovs_ct_execute(struct net *net, struct sk_buff *skb,
1305 struct sw_flow_key *key,
1306 const struct ovs_conntrack_info *info)
1311 /* The conntrack module expects to be working at L3. */
1312 nh_ofs = skb_network_offset(skb);
1313 skb_pull_rcsum(skb, nh_ofs);
1315 err = ovs_skb_network_trim(skb);
1319 if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
1320 err = handle_fragments(net, key, info->zone.id, skb);
1326 err = ovs_ct_commit(net, key, info, skb);
1328 err = ovs_ct_lookup(net, key, info, skb);
1330 skb_push_rcsum(skb, nh_ofs);
1336 int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
1338 enum ip_conntrack_info ctinfo;
1341 ct = nf_ct_get(skb, &ctinfo);
1344 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
1347 ovs_ct_fill_key(skb, key, false);
1352 static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
1353 const struct sw_flow_key *key, bool log)
1355 struct nf_conntrack_helper *helper;
1356 struct nf_conn_help *help;
1359 helper = nf_conntrack_helper_try_module_get(name, info->family,
1362 OVS_NLERR(log, "Unknown helper \"%s\"", name);
1366 help = nf_ct_helper_ext_add(info->ct, GFP_KERNEL);
1368 nf_conntrack_helper_put(helper);
1372 #if IS_ENABLED(CONFIG_NF_NAT)
1374 ret = nf_nat_helper_try_module_get(name, info->family,
1377 nf_conntrack_helper_put(helper);
1378 OVS_NLERR(log, "Failed to load \"%s\" NAT helper, error: %d",
1384 rcu_assign_pointer(help->helper, helper);
1385 info->helper = helper;
1389 #if IS_ENABLED(CONFIG_NF_NAT)
1390 static int parse_nat(const struct nlattr *attr,
1391 struct ovs_conntrack_info *info, bool log)
1395 bool have_ip_max = false;
1396 bool have_proto_max = false;
1397 bool ip_vers = (info->family == NFPROTO_IPV6);
1399 nla_for_each_nested(a, attr, rem) {
1400 static const int ovs_nat_attr_lens[OVS_NAT_ATTR_MAX + 1][2] = {
1401 [OVS_NAT_ATTR_SRC] = {0, 0},
1402 [OVS_NAT_ATTR_DST] = {0, 0},
1403 [OVS_NAT_ATTR_IP_MIN] = {sizeof(struct in_addr),
1404 sizeof(struct in6_addr)},
1405 [OVS_NAT_ATTR_IP_MAX] = {sizeof(struct in_addr),
1406 sizeof(struct in6_addr)},
1407 [OVS_NAT_ATTR_PROTO_MIN] = {sizeof(u16), sizeof(u16)},
1408 [OVS_NAT_ATTR_PROTO_MAX] = {sizeof(u16), sizeof(u16)},
1409 [OVS_NAT_ATTR_PERSISTENT] = {0, 0},
1410 [OVS_NAT_ATTR_PROTO_HASH] = {0, 0},
1411 [OVS_NAT_ATTR_PROTO_RANDOM] = {0, 0},
1413 int type = nla_type(a);
1415 if (type > OVS_NAT_ATTR_MAX) {
1416 OVS_NLERR(log, "Unknown NAT attribute (type=%d, max=%d)",
1417 type, OVS_NAT_ATTR_MAX);
1421 if (nla_len(a) != ovs_nat_attr_lens[type][ip_vers]) {
1422 OVS_NLERR(log, "NAT attribute type %d has unexpected length (%d != %d)",
1424 ovs_nat_attr_lens[type][ip_vers]);
1429 case OVS_NAT_ATTR_SRC:
1430 case OVS_NAT_ATTR_DST:
1432 OVS_NLERR(log, "Only one type of NAT may be specified");
1435 info->nat |= OVS_CT_NAT;
1436 info->nat |= ((type == OVS_NAT_ATTR_SRC)
1437 ? OVS_CT_SRC_NAT : OVS_CT_DST_NAT);
1440 case OVS_NAT_ATTR_IP_MIN:
1441 nla_memcpy(&info->range.min_addr, a,
1442 sizeof(info->range.min_addr));
1443 info->range.flags |= NF_NAT_RANGE_MAP_IPS;
1446 case OVS_NAT_ATTR_IP_MAX:
1448 nla_memcpy(&info->range.max_addr, a,
1449 sizeof(info->range.max_addr));
1450 info->range.flags |= NF_NAT_RANGE_MAP_IPS;
1453 case OVS_NAT_ATTR_PROTO_MIN:
1454 info->range.min_proto.all = htons(nla_get_u16(a));
1455 info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1458 case OVS_NAT_ATTR_PROTO_MAX:
1459 have_proto_max = true;
1460 info->range.max_proto.all = htons(nla_get_u16(a));
1461 info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1464 case OVS_NAT_ATTR_PERSISTENT:
1465 info->range.flags |= NF_NAT_RANGE_PERSISTENT;
1468 case OVS_NAT_ATTR_PROTO_HASH:
1469 info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM;
1472 case OVS_NAT_ATTR_PROTO_RANDOM:
1473 info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM_FULLY;
1477 OVS_NLERR(log, "Unknown nat attribute (%d)", type);
1483 OVS_NLERR(log, "NAT attribute has %d unknown bytes", rem);
1487 /* Do not allow flags if no type is given. */
1488 if (info->range.flags) {
1490 "NAT flags may be given only when NAT range (SRC or DST) is also specified."
1494 info->nat = OVS_CT_NAT; /* NAT existing connections. */
1495 } else if (!info->commit) {
1497 "NAT attributes may be specified only when CT COMMIT flag is also specified."
1501 /* Allow missing IP_MAX. */
1502 if (info->range.flags & NF_NAT_RANGE_MAP_IPS && !have_ip_max) {
1503 memcpy(&info->range.max_addr, &info->range.min_addr,
1504 sizeof(info->range.max_addr));
1506 /* Allow missing PROTO_MAX. */
1507 if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
1509 info->range.max_proto.all = info->range.min_proto.all;
1515 static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
1516 [OVS_CT_ATTR_COMMIT] = { .minlen = 0, .maxlen = 0 },
1517 [OVS_CT_ATTR_FORCE_COMMIT] = { .minlen = 0, .maxlen = 0 },
1518 [OVS_CT_ATTR_ZONE] = { .minlen = sizeof(u16),
1519 .maxlen = sizeof(u16) },
1520 [OVS_CT_ATTR_MARK] = { .minlen = sizeof(struct md_mark),
1521 .maxlen = sizeof(struct md_mark) },
1522 [OVS_CT_ATTR_LABELS] = { .minlen = sizeof(struct md_labels),
1523 .maxlen = sizeof(struct md_labels) },
1524 [OVS_CT_ATTR_HELPER] = { .minlen = 1,
1525 .maxlen = NF_CT_HELPER_NAME_LEN },
1526 #if IS_ENABLED(CONFIG_NF_NAT)
1527 /* NAT length is checked when parsing the nested attributes. */
1528 [OVS_CT_ATTR_NAT] = { .minlen = 0, .maxlen = INT_MAX },
1530 [OVS_CT_ATTR_EVENTMASK] = { .minlen = sizeof(u32),
1531 .maxlen = sizeof(u32) },
1532 [OVS_CT_ATTR_TIMEOUT] = { .minlen = 1,
1533 .maxlen = CTNL_TIMEOUT_NAME_MAX },
1536 static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
1537 const char **helper, bool log)
1542 nla_for_each_nested(a, attr, rem) {
1543 int type = nla_type(a);
1547 if (type > OVS_CT_ATTR_MAX) {
1549 "Unknown conntrack attr (type=%d, max=%d)",
1550 type, OVS_CT_ATTR_MAX);
1554 maxlen = ovs_ct_attr_lens[type].maxlen;
1555 minlen = ovs_ct_attr_lens[type].minlen;
1556 if (nla_len(a) < minlen || nla_len(a) > maxlen) {
1558 "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
1559 type, nla_len(a), maxlen);
1564 case OVS_CT_ATTR_FORCE_COMMIT:
1567 case OVS_CT_ATTR_COMMIT:
1568 info->commit = true;
1570 #ifdef CONFIG_NF_CONNTRACK_ZONES
1571 case OVS_CT_ATTR_ZONE:
1572 info->zone.id = nla_get_u16(a);
1575 #ifdef CONFIG_NF_CONNTRACK_MARK
1576 case OVS_CT_ATTR_MARK: {
1577 struct md_mark *mark = nla_data(a);
1580 OVS_NLERR(log, "ct_mark mask cannot be 0");
1587 #ifdef CONFIG_NF_CONNTRACK_LABELS
1588 case OVS_CT_ATTR_LABELS: {
1589 struct md_labels *labels = nla_data(a);
1591 if (!labels_nonzero(&labels->mask)) {
1592 OVS_NLERR(log, "ct_labels mask cannot be 0");
1595 info->labels = *labels;
1599 case OVS_CT_ATTR_HELPER:
1600 *helper = nla_data(a);
1601 if (!memchr(*helper, '\0', nla_len(a))) {
1602 OVS_NLERR(log, "Invalid conntrack helper");
1606 #if IS_ENABLED(CONFIG_NF_NAT)
1607 case OVS_CT_ATTR_NAT: {
1608 int err = parse_nat(a, info, log);
1615 case OVS_CT_ATTR_EVENTMASK:
1616 info->have_eventmask = true;
1617 info->eventmask = nla_get_u32(a);
1619 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1620 case OVS_CT_ATTR_TIMEOUT:
1621 memcpy(info->timeout, nla_data(a), nla_len(a));
1622 if (!memchr(info->timeout, '\0', nla_len(a))) {
1623 OVS_NLERR(log, "Invalid conntrack timeout");
1630 OVS_NLERR(log, "Unknown conntrack attr (%d)",
1636 #ifdef CONFIG_NF_CONNTRACK_MARK
1637 if (!info->commit && info->mark.mask) {
1639 "Setting conntrack mark requires 'commit' flag.");
1643 #ifdef CONFIG_NF_CONNTRACK_LABELS
1644 if (!info->commit && labels_nonzero(&info->labels.mask)) {
1646 "Setting conntrack labels requires 'commit' flag.");
1651 OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem);
1658 bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr)
1660 if (attr == OVS_KEY_ATTR_CT_STATE)
1662 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1663 attr == OVS_KEY_ATTR_CT_ZONE)
1665 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1666 attr == OVS_KEY_ATTR_CT_MARK)
1668 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1669 attr == OVS_KEY_ATTR_CT_LABELS) {
1670 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1672 return ovs_net->xt_label;
1678 int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1679 const struct sw_flow_key *key,
1680 struct sw_flow_actions **sfa, bool log)
1682 struct ovs_conntrack_info ct_info;
1683 const char *helper = NULL;
1687 family = key_to_nfproto(key);
1688 if (family == NFPROTO_UNSPEC) {
1689 OVS_NLERR(log, "ct family unspecified");
1693 memset(&ct_info, 0, sizeof(ct_info));
1694 ct_info.family = family;
1696 nf_ct_zone_init(&ct_info.zone, NF_CT_DEFAULT_ZONE_ID,
1697 NF_CT_DEFAULT_ZONE_DIR, 0);
1699 err = parse_ct(attr, &ct_info, &helper, log);
1703 /* Set up template for tracking connections in specific zones. */
1704 ct_info.ct = nf_ct_tmpl_alloc(net, &ct_info.zone, GFP_KERNEL);
1706 OVS_NLERR(log, "Failed to allocate conntrack template");
1710 if (ct_info.timeout[0]) {
1711 if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
1713 pr_info_ratelimited("Failed to associated timeout "
1714 "policy `%s'\n", ct_info.timeout);
1716 ct_info.nf_ct_timeout = rcu_dereference(
1717 nf_ct_timeout_find(ct_info.ct)->timeout);
1722 err = ovs_ct_add_helper(&ct_info, helper, key, log);
1727 err = ovs_nla_add_action(sfa, OVS_ACTION_ATTR_CT, &ct_info,
1728 sizeof(ct_info), log);
1732 __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
1735 __ovs_ct_free_action(&ct_info);
1739 #if IS_ENABLED(CONFIG_NF_NAT)
1740 static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
1741 struct sk_buff *skb)
1743 struct nlattr *start;
1745 start = nla_nest_start_noflag(skb, OVS_CT_ATTR_NAT);
1749 if (info->nat & OVS_CT_SRC_NAT) {
1750 if (nla_put_flag(skb, OVS_NAT_ATTR_SRC))
1752 } else if (info->nat & OVS_CT_DST_NAT) {
1753 if (nla_put_flag(skb, OVS_NAT_ATTR_DST))
1759 if (info->range.flags & NF_NAT_RANGE_MAP_IPS) {
1760 if (IS_ENABLED(CONFIG_NF_NAT) &&
1761 info->family == NFPROTO_IPV4) {
1762 if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN,
1763 info->range.min_addr.ip) ||
1764 (info->range.max_addr.ip
1765 != info->range.min_addr.ip &&
1766 (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX,
1767 info->range.max_addr.ip))))
1769 } else if (IS_ENABLED(CONFIG_IPV6) &&
1770 info->family == NFPROTO_IPV6) {
1771 if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN,
1772 &info->range.min_addr.in6) ||
1773 (memcmp(&info->range.max_addr.in6,
1774 &info->range.min_addr.in6,
1775 sizeof(info->range.max_addr.in6)) &&
1776 (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX,
1777 &info->range.max_addr.in6))))
1783 if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
1784 (nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MIN,
1785 ntohs(info->range.min_proto.all)) ||
1786 (info->range.max_proto.all != info->range.min_proto.all &&
1787 nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MAX,
1788 ntohs(info->range.max_proto.all)))))
1791 if (info->range.flags & NF_NAT_RANGE_PERSISTENT &&
1792 nla_put_flag(skb, OVS_NAT_ATTR_PERSISTENT))
1794 if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM &&
1795 nla_put_flag(skb, OVS_NAT_ATTR_PROTO_HASH))
1797 if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY &&
1798 nla_put_flag(skb, OVS_NAT_ATTR_PROTO_RANDOM))
1801 nla_nest_end(skb, start);
1807 int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
1808 struct sk_buff *skb)
1810 struct nlattr *start;
1812 start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CT);
1816 if (ct_info->commit && nla_put_flag(skb, ct_info->force
1817 ? OVS_CT_ATTR_FORCE_COMMIT
1818 : OVS_CT_ATTR_COMMIT))
1820 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1821 nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
1823 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask &&
1824 nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
1827 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1828 labels_nonzero(&ct_info->labels.mask) &&
1829 nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
1832 if (ct_info->helper) {
1833 if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
1834 ct_info->helper->name))
1837 if (ct_info->have_eventmask &&
1838 nla_put_u32(skb, OVS_CT_ATTR_EVENTMASK, ct_info->eventmask))
1840 if (ct_info->timeout[0]) {
1841 if (nla_put_string(skb, OVS_CT_ATTR_TIMEOUT, ct_info->timeout))
1845 #if IS_ENABLED(CONFIG_NF_NAT)
1846 if (ct_info->nat && !ovs_ct_nat_to_attr(ct_info, skb))
1849 nla_nest_end(skb, start);
1854 void ovs_ct_free_action(const struct nlattr *a)
1856 struct ovs_conntrack_info *ct_info = nla_data(a);
1858 __ovs_ct_free_action(ct_info);
1861 static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
1863 if (ct_info->helper) {
1864 #if IS_ENABLED(CONFIG_NF_NAT)
1866 nf_nat_helper_put(ct_info->helper);
1868 nf_conntrack_helper_put(ct_info->helper);
1871 if (ct_info->timeout[0])
1872 nf_ct_destroy_timeout(ct_info->ct);
1873 nf_ct_tmpl_free(ct_info->ct);
1877 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1878 static int ovs_ct_limit_init(struct net *net, struct ovs_net *ovs_net)
1882 ovs_net->ct_limit_info = kmalloc(sizeof(*ovs_net->ct_limit_info),
1884 if (!ovs_net->ct_limit_info)
1887 ovs_net->ct_limit_info->default_limit = OVS_CT_LIMIT_DEFAULT;
1888 ovs_net->ct_limit_info->limits =
1889 kmalloc_array(CT_LIMIT_HASH_BUCKETS, sizeof(struct hlist_head),
1891 if (!ovs_net->ct_limit_info->limits) {
1892 kfree(ovs_net->ct_limit_info);
1896 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; i++)
1897 INIT_HLIST_HEAD(&ovs_net->ct_limit_info->limits[i]);
1899 ovs_net->ct_limit_info->data =
1900 nf_conncount_init(net, NFPROTO_INET, sizeof(u32));
1902 if (IS_ERR(ovs_net->ct_limit_info->data)) {
1903 err = PTR_ERR(ovs_net->ct_limit_info->data);
1904 kfree(ovs_net->ct_limit_info->limits);
1905 kfree(ovs_net->ct_limit_info);
1906 pr_err("openvswitch: failed to init nf_conncount %d\n", err);
1912 static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
1914 const struct ovs_ct_limit_info *info = ovs_net->ct_limit_info;
1917 nf_conncount_destroy(net, NFPROTO_INET, info->data);
1918 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
1919 struct hlist_head *head = &info->limits[i];
1920 struct ovs_ct_limit *ct_limit;
1922 hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
1923 lockdep_ovsl_is_held())
1924 kfree_rcu(ct_limit, rcu);
1926 kfree(info->limits);
1930 static struct sk_buff *
1931 ovs_ct_limit_cmd_reply_start(struct genl_info *info, u8 cmd,
1932 struct ovs_header **ovs_reply_header)
1934 struct ovs_header *ovs_header = info->userhdr;
1935 struct sk_buff *skb;
1937 skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1939 return ERR_PTR(-ENOMEM);
1941 *ovs_reply_header = genlmsg_put(skb, info->snd_portid,
1943 &dp_ct_limit_genl_family, 0, cmd);
1945 if (!*ovs_reply_header) {
1947 return ERR_PTR(-EMSGSIZE);
1949 (*ovs_reply_header)->dp_ifindex = ovs_header->dp_ifindex;
1954 static bool check_zone_id(int zone_id, u16 *pzone)
1956 if (zone_id >= 0 && zone_id <= 65535) {
1957 *pzone = (u16)zone_id;
1963 static int ovs_ct_limit_set_zone_limit(struct nlattr *nla_zone_limit,
1964 struct ovs_ct_limit_info *info)
1966 struct ovs_zone_limit *zone_limit;
1970 rem = NLA_ALIGN(nla_len(nla_zone_limit));
1971 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
1973 while (rem >= sizeof(*zone_limit)) {
1974 if (unlikely(zone_limit->zone_id ==
1975 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
1977 info->default_limit = zone_limit->limit;
1979 } else if (unlikely(!check_zone_id(
1980 zone_limit->zone_id, &zone))) {
1981 OVS_NLERR(true, "zone id is out of range");
1983 struct ovs_ct_limit *ct_limit;
1985 ct_limit = kmalloc(sizeof(*ct_limit), GFP_KERNEL);
1989 ct_limit->zone = zone;
1990 ct_limit->limit = zone_limit->limit;
1993 ct_limit_set(info, ct_limit);
1996 rem -= NLA_ALIGN(sizeof(*zone_limit));
1997 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
1998 NLA_ALIGN(sizeof(*zone_limit)));
2002 OVS_NLERR(true, "set zone limit has %d unknown bytes", rem);
2007 static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit,
2008 struct ovs_ct_limit_info *info)
2010 struct ovs_zone_limit *zone_limit;
2014 rem = NLA_ALIGN(nla_len(nla_zone_limit));
2015 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
2017 while (rem >= sizeof(*zone_limit)) {
2018 if (unlikely(zone_limit->zone_id ==
2019 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
2021 info->default_limit = OVS_CT_LIMIT_DEFAULT;
2023 } else if (unlikely(!check_zone_id(
2024 zone_limit->zone_id, &zone))) {
2025 OVS_NLERR(true, "zone id is out of range");
2028 ct_limit_del(info, zone);
2031 rem -= NLA_ALIGN(sizeof(*zone_limit));
2032 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
2033 NLA_ALIGN(sizeof(*zone_limit)));
2037 OVS_NLERR(true, "del zone limit has %d unknown bytes", rem);
2042 static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
2043 struct sk_buff *reply)
2045 struct ovs_zone_limit zone_limit = {
2046 .zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE,
2047 .limit = info->default_limit,
2050 return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
2053 static int __ovs_ct_limit_get_zone_limit(struct net *net,
2054 struct nf_conncount_data *data,
2055 u16 zone_id, u32 limit,
2056 struct sk_buff *reply)
2058 struct nf_conntrack_zone ct_zone;
2059 struct ovs_zone_limit zone_limit;
2060 u32 conncount_key = zone_id;
2062 zone_limit.zone_id = zone_id;
2063 zone_limit.limit = limit;
2064 nf_ct_zone_init(&ct_zone, zone_id, NF_CT_DEFAULT_ZONE_DIR, 0);
2066 zone_limit.count = nf_conncount_count(net, data, &conncount_key, NULL,
2068 return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
2071 static int ovs_ct_limit_get_zone_limit(struct net *net,
2072 struct nlattr *nla_zone_limit,
2073 struct ovs_ct_limit_info *info,
2074 struct sk_buff *reply)
2076 struct ovs_zone_limit *zone_limit;
2081 rem = NLA_ALIGN(nla_len(nla_zone_limit));
2082 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
2084 while (rem >= sizeof(*zone_limit)) {
2085 if (unlikely(zone_limit->zone_id ==
2086 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
2087 err = ovs_ct_limit_get_default_limit(info, reply);
2090 } else if (unlikely(!check_zone_id(zone_limit->zone_id,
2092 OVS_NLERR(true, "zone id is out of range");
2095 limit = ct_limit_get(info, zone);
2098 err = __ovs_ct_limit_get_zone_limit(
2099 net, info->data, zone, limit, reply);
2103 rem -= NLA_ALIGN(sizeof(*zone_limit));
2104 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
2105 NLA_ALIGN(sizeof(*zone_limit)));
2109 OVS_NLERR(true, "get zone limit has %d unknown bytes", rem);
2114 static int ovs_ct_limit_get_all_zone_limit(struct net *net,
2115 struct ovs_ct_limit_info *info,
2116 struct sk_buff *reply)
2118 struct ovs_ct_limit *ct_limit;
2119 struct hlist_head *head;
2122 err = ovs_ct_limit_get_default_limit(info, reply);
2127 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
2128 head = &info->limits[i];
2129 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
2130 err = __ovs_ct_limit_get_zone_limit(net, info->data,
2131 ct_limit->zone, ct_limit->limit, reply);
2142 static int ovs_ct_limit_cmd_set(struct sk_buff *skb, struct genl_info *info)
2144 struct nlattr **a = info->attrs;
2145 struct sk_buff *reply;
2146 struct ovs_header *ovs_reply_header;
2147 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
2148 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2151 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_SET,
2154 return PTR_ERR(reply);
2156 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2161 err = ovs_ct_limit_set_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
2166 static_branch_enable(&ovs_ct_limit_enabled);
2168 genlmsg_end(reply, ovs_reply_header);
2169 return genlmsg_reply(reply, info);
2176 static int ovs_ct_limit_cmd_del(struct sk_buff *skb, struct genl_info *info)
2178 struct nlattr **a = info->attrs;
2179 struct sk_buff *reply;
2180 struct ovs_header *ovs_reply_header;
2181 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
2182 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2185 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_DEL,
2188 return PTR_ERR(reply);
2190 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2195 err = ovs_ct_limit_del_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
2200 genlmsg_end(reply, ovs_reply_header);
2201 return genlmsg_reply(reply, info);
2208 static int ovs_ct_limit_cmd_get(struct sk_buff *skb, struct genl_info *info)
2210 struct nlattr **a = info->attrs;
2211 struct nlattr *nla_reply;
2212 struct sk_buff *reply;
2213 struct ovs_header *ovs_reply_header;
2214 struct net *net = sock_net(skb->sk);
2215 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2216 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2219 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_GET,
2222 return PTR_ERR(reply);
2224 nla_reply = nla_nest_start_noflag(reply, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
2230 if (a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2231 err = ovs_ct_limit_get_zone_limit(
2232 net, a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT], ct_limit_info,
2237 err = ovs_ct_limit_get_all_zone_limit(net, ct_limit_info,
2243 nla_nest_end(reply, nla_reply);
2244 genlmsg_end(reply, ovs_reply_header);
2245 return genlmsg_reply(reply, info);
2252 static const struct genl_small_ops ct_limit_genl_ops[] = {
2253 { .cmd = OVS_CT_LIMIT_CMD_SET,
2254 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2255 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN
2257 .doit = ovs_ct_limit_cmd_set,
2259 { .cmd = OVS_CT_LIMIT_CMD_DEL,
2260 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2261 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN
2263 .doit = ovs_ct_limit_cmd_del,
2265 { .cmd = OVS_CT_LIMIT_CMD_GET,
2266 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2267 .flags = 0, /* OK for unprivileged users. */
2268 .doit = ovs_ct_limit_cmd_get,
2272 static const struct genl_multicast_group ovs_ct_limit_multicast_group = {
2273 .name = OVS_CT_LIMIT_MCGROUP,
2276 struct genl_family dp_ct_limit_genl_family __ro_after_init = {
2277 .hdrsize = sizeof(struct ovs_header),
2278 .name = OVS_CT_LIMIT_FAMILY,
2279 .version = OVS_CT_LIMIT_VERSION,
2280 .maxattr = OVS_CT_LIMIT_ATTR_MAX,
2281 .policy = ct_limit_policy,
2283 .parallel_ops = true,
2284 .small_ops = ct_limit_genl_ops,
2285 .n_small_ops = ARRAY_SIZE(ct_limit_genl_ops),
2286 .mcgrps = &ovs_ct_limit_multicast_group,
2288 .module = THIS_MODULE,
2292 int ovs_ct_init(struct net *net)
2294 unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
2295 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2297 if (nf_connlabels_get(net, n_bits - 1)) {
2298 ovs_net->xt_label = false;
2299 OVS_NLERR(true, "Failed to set connlabel length");
2301 ovs_net->xt_label = true;
2304 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2305 return ovs_ct_limit_init(net, ovs_net);
2311 void ovs_ct_exit(struct net *net)
2313 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2315 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2316 ovs_ct_limit_exit(net, ovs_net);
2319 if (ovs_net->xt_label)
2320 nf_connlabels_put(net);