2 * Copyright (c) 2007-2017 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/in6.h>
30 #include <linux/if_arp.h>
31 #include <linux/if_vlan.h>
36 #include <net/ip6_fib.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
40 #include <net/sctp/checksum.h>
44 #include "conntrack.h"
46 #include "flow_netlink.h"
48 struct deferred_action {
50 const struct nlattr *actions;
53 /* Store pkt_key clone when creating deferred action. */
54 struct sw_flow_key pkt_key;
57 #define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
58 struct ovs_frag_data {
62 __be16 inner_protocol;
63 u16 network_offset; /* valid only for MPLS */
68 u8 l2_data[MAX_L2_LEN];
71 static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
73 #define DEFERRED_ACTION_FIFO_SIZE 10
74 #define OVS_RECURSION_LIMIT 5
75 #define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
79 /* Deferred action fifo queue storage. */
80 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
83 struct action_flow_keys {
84 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
87 static struct action_fifo __percpu *action_fifos;
88 static struct action_flow_keys __percpu *flow_keys;
89 static DEFINE_PER_CPU(int, exec_actions_level);
91 /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
92 * space. Return NULL if out of key spaces.
94 static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
96 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
97 int level = this_cpu_read(exec_actions_level);
98 struct sw_flow_key *key = NULL;
100 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
101 key = &keys->key[level - 1];
108 static void action_fifo_init(struct action_fifo *fifo)
114 static bool action_fifo_is_empty(const struct action_fifo *fifo)
116 return (fifo->head == fifo->tail);
119 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
121 if (action_fifo_is_empty(fifo))
124 return &fifo->fifo[fifo->tail++];
127 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
129 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
132 return &fifo->fifo[fifo->head++];
135 /* Return true if fifo is not full */
136 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
137 const struct sw_flow_key *key,
138 const struct nlattr *actions,
139 const int actions_len)
141 struct action_fifo *fifo;
142 struct deferred_action *da;
144 fifo = this_cpu_ptr(action_fifos);
145 da = action_fifo_put(fifo);
148 da->actions = actions;
149 da->actions_len = actions_len;
156 static void invalidate_flow_key(struct sw_flow_key *key)
158 key->mac_proto |= SW_FLOW_KEY_INVALID;
161 static bool is_flow_key_valid(const struct sw_flow_key *key)
163 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
166 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
167 struct sw_flow_key *key,
169 const struct nlattr *actions, int len,
170 bool last, bool clone_flow_key);
172 static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
175 if (skb->ip_summed == CHECKSUM_COMPLETE) {
176 __be16 diff[] = { ~(hdr->h_proto), ethertype };
178 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
181 hdr->h_proto = ethertype;
184 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
185 const struct ovs_action_push_mpls *mpls)
187 struct mpls_shim_hdr *new_mpls_lse;
189 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
190 if (skb->encapsulation)
193 if (skb_cow_head(skb, MPLS_HLEN) < 0)
196 if (!skb->inner_protocol) {
197 skb_set_inner_network_header(skb, skb->mac_len);
198 skb_set_inner_protocol(skb, skb->protocol);
201 skb_push(skb, MPLS_HLEN);
202 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
204 skb_reset_mac_header(skb);
205 skb_set_network_header(skb, skb->mac_len);
207 new_mpls_lse = mpls_hdr(skb);
208 new_mpls_lse->label_stack_entry = mpls->mpls_lse;
210 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
212 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET)
213 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
214 skb->protocol = mpls->mpls_ethertype;
216 invalidate_flow_key(key);
220 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
221 const __be16 ethertype)
225 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
229 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
231 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
234 __skb_pull(skb, MPLS_HLEN);
235 skb_reset_mac_header(skb);
236 skb_set_network_header(skb, skb->mac_len);
238 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET) {
241 /* mpls_hdr() is used to locate the ethertype field correctly in the
242 * presence of VLAN tags.
244 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
245 update_ethertype(skb, hdr, ethertype);
247 if (eth_p_mpls(skb->protocol))
248 skb->protocol = ethertype;
250 invalidate_flow_key(key);
254 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
255 const __be32 *mpls_lse, const __be32 *mask)
257 struct mpls_shim_hdr *stack;
261 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
265 stack = mpls_hdr(skb);
266 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
267 if (skb->ip_summed == CHECKSUM_COMPLETE) {
268 __be32 diff[] = { ~(stack->label_stack_entry), lse };
270 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
273 stack->label_stack_entry = lse;
274 flow_key->mpls.top_lse = lse;
278 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
282 err = skb_vlan_pop(skb);
283 if (skb_vlan_tag_present(skb)) {
284 invalidate_flow_key(key);
286 key->eth.vlan.tci = 0;
287 key->eth.vlan.tpid = 0;
292 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
293 const struct ovs_action_push_vlan *vlan)
295 if (skb_vlan_tag_present(skb)) {
296 invalidate_flow_key(key);
298 key->eth.vlan.tci = vlan->vlan_tci;
299 key->eth.vlan.tpid = vlan->vlan_tpid;
301 return skb_vlan_push(skb, vlan->vlan_tpid,
302 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
305 /* 'src' is already properly masked. */
306 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
308 u16 *dst = (u16 *)dst_;
309 const u16 *src = (const u16 *)src_;
310 const u16 *mask = (const u16 *)mask_;
312 OVS_SET_MASKED(dst[0], src[0], mask[0]);
313 OVS_SET_MASKED(dst[1], src[1], mask[1]);
314 OVS_SET_MASKED(dst[2], src[2], mask[2]);
317 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
318 const struct ovs_key_ethernet *key,
319 const struct ovs_key_ethernet *mask)
323 err = skb_ensure_writable(skb, ETH_HLEN);
327 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
329 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
331 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
334 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
336 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
337 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
341 /* pop_eth does not support VLAN packets as this action is never called
344 static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
346 skb_pull_rcsum(skb, ETH_HLEN);
347 skb_reset_mac_header(skb);
348 skb_reset_mac_len(skb);
350 /* safe right before invalidate_flow_key */
351 key->mac_proto = MAC_PROTO_NONE;
352 invalidate_flow_key(key);
356 static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
357 const struct ovs_action_push_eth *ethh)
361 /* Add the new Ethernet header */
362 if (skb_cow_head(skb, ETH_HLEN) < 0)
365 skb_push(skb, ETH_HLEN);
366 skb_reset_mac_header(skb);
367 skb_reset_mac_len(skb);
370 ether_addr_copy(hdr->h_source, ethh->addresses.eth_src);
371 ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst);
372 hdr->h_proto = skb->protocol;
374 skb_postpush_rcsum(skb, hdr, ETH_HLEN);
376 /* safe right before invalidate_flow_key */
377 key->mac_proto = MAC_PROTO_ETHERNET;
378 invalidate_flow_key(key);
382 static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
383 const struct nshhdr *nh)
387 err = nsh_push(skb, nh);
391 /* safe right before invalidate_flow_key */
392 key->mac_proto = MAC_PROTO_NONE;
393 invalidate_flow_key(key);
397 static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
405 /* safe right before invalidate_flow_key */
406 if (skb->protocol == htons(ETH_P_TEB))
407 key->mac_proto = MAC_PROTO_ETHERNET;
409 key->mac_proto = MAC_PROTO_NONE;
410 invalidate_flow_key(key);
414 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
415 __be32 addr, __be32 new_addr)
417 int transport_len = skb->len - skb_transport_offset(skb);
419 if (nh->frag_off & htons(IP_OFFSET))
422 if (nh->protocol == IPPROTO_TCP) {
423 if (likely(transport_len >= sizeof(struct tcphdr)))
424 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
425 addr, new_addr, true);
426 } else if (nh->protocol == IPPROTO_UDP) {
427 if (likely(transport_len >= sizeof(struct udphdr))) {
428 struct udphdr *uh = udp_hdr(skb);
430 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
431 inet_proto_csum_replace4(&uh->check, skb,
432 addr, new_addr, true);
434 uh->check = CSUM_MANGLED_0;
440 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
441 __be32 *addr, __be32 new_addr)
443 update_ip_l4_checksum(skb, nh, *addr, new_addr);
444 csum_replace4(&nh->check, *addr, new_addr);
449 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
450 __be32 addr[4], const __be32 new_addr[4])
452 int transport_len = skb->len - skb_transport_offset(skb);
454 if (l4_proto == NEXTHDR_TCP) {
455 if (likely(transport_len >= sizeof(struct tcphdr)))
456 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
457 addr, new_addr, true);
458 } else if (l4_proto == NEXTHDR_UDP) {
459 if (likely(transport_len >= sizeof(struct udphdr))) {
460 struct udphdr *uh = udp_hdr(skb);
462 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
463 inet_proto_csum_replace16(&uh->check, skb,
464 addr, new_addr, true);
466 uh->check = CSUM_MANGLED_0;
469 } else if (l4_proto == NEXTHDR_ICMP) {
470 if (likely(transport_len >= sizeof(struct icmp6hdr)))
471 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
472 skb, addr, new_addr, true);
476 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
477 const __be32 mask[4], __be32 masked[4])
479 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
480 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
481 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
482 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
485 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
486 __be32 addr[4], const __be32 new_addr[4],
487 bool recalculate_csum)
489 if (recalculate_csum)
490 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
493 memcpy(addr, new_addr, sizeof(__be32[4]));
496 static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
498 u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
500 ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
502 if (skb->ip_summed == CHECKSUM_COMPLETE)
503 csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
504 (__force __wsum)(ipv6_tclass << 12));
506 ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
509 static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
513 ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
514 fl = OVS_MASKED(ofl, fl, mask);
516 /* Bits 21-24 are always unmasked, so this retains their values. */
517 nh->flow_lbl[0] = (u8)(fl >> 16);
518 nh->flow_lbl[1] = (u8)(fl >> 8);
519 nh->flow_lbl[2] = (u8)fl;
521 if (skb->ip_summed == CHECKSUM_COMPLETE)
522 csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
525 static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
527 new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
529 if (skb->ip_summed == CHECKSUM_COMPLETE)
530 csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
531 (__force __wsum)(new_ttl << 8));
532 nh->hop_limit = new_ttl;
535 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
538 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
540 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
544 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
545 const struct ovs_key_ipv4 *key,
546 const struct ovs_key_ipv4 *mask)
552 err = skb_ensure_writable(skb, skb_network_offset(skb) +
553 sizeof(struct iphdr));
559 /* Setting an IP addresses is typically only a side effect of
560 * matching on them in the current userspace implementation, so it
561 * makes sense to check if the value actually changed.
563 if (mask->ipv4_src) {
564 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
566 if (unlikely(new_addr != nh->saddr)) {
567 set_ip_addr(skb, nh, &nh->saddr, new_addr);
568 flow_key->ipv4.addr.src = new_addr;
571 if (mask->ipv4_dst) {
572 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
574 if (unlikely(new_addr != nh->daddr)) {
575 set_ip_addr(skb, nh, &nh->daddr, new_addr);
576 flow_key->ipv4.addr.dst = new_addr;
579 if (mask->ipv4_tos) {
580 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
581 flow_key->ip.tos = nh->tos;
583 if (mask->ipv4_ttl) {
584 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
585 flow_key->ip.ttl = nh->ttl;
591 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
593 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
596 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
597 const struct ovs_key_ipv6 *key,
598 const struct ovs_key_ipv6 *mask)
603 err = skb_ensure_writable(skb, skb_network_offset(skb) +
604 sizeof(struct ipv6hdr));
610 /* Setting an IP addresses is typically only a side effect of
611 * matching on them in the current userspace implementation, so it
612 * makes sense to check if the value actually changed.
614 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
615 __be32 *saddr = (__be32 *)&nh->saddr;
618 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
620 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
621 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
623 memcpy(&flow_key->ipv6.addr.src, masked,
624 sizeof(flow_key->ipv6.addr.src));
627 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
628 unsigned int offset = 0;
629 int flags = IP6_FH_F_SKIP_RH;
630 bool recalc_csum = true;
631 __be32 *daddr = (__be32 *)&nh->daddr;
634 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
636 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
637 if (ipv6_ext_hdr(nh->nexthdr))
638 recalc_csum = (ipv6_find_hdr(skb, &offset,
643 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
645 memcpy(&flow_key->ipv6.addr.dst, masked,
646 sizeof(flow_key->ipv6.addr.dst));
649 if (mask->ipv6_tclass) {
650 set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
651 flow_key->ip.tos = ipv6_get_dsfield(nh);
653 if (mask->ipv6_label) {
654 set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
655 ntohl(mask->ipv6_label));
656 flow_key->ipv6.label =
657 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
659 if (mask->ipv6_hlimit) {
660 set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
661 flow_key->ip.ttl = nh->hop_limit;
666 static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
667 const struct nlattr *a)
676 struct ovs_key_nsh key;
677 struct ovs_key_nsh mask;
679 err = nsh_key_from_nlattr(a, &key, &mask);
683 /* Make sure the NSH base header is there */
684 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
688 length = nsh_hdr_len(nh);
690 /* Make sure the whole NSH header is there */
691 err = skb_ensure_writable(skb, skb_network_offset(skb) +
697 skb_postpull_rcsum(skb, nh, length);
698 flags = nsh_get_flags(nh);
699 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
700 flow_key->nsh.base.flags = flags;
701 ttl = nsh_get_ttl(nh);
702 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
703 flow_key->nsh.base.ttl = ttl;
704 nsh_set_flags_and_ttl(nh, flags, ttl);
705 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
707 flow_key->nsh.base.path_hdr = nh->path_hdr;
708 switch (nh->mdtype) {
710 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
712 OVS_MASKED(nh->md1.context[i], key.context[i],
715 memcpy(flow_key->nsh.context, nh->md1.context,
716 sizeof(nh->md1.context));
719 memset(flow_key->nsh.context, 0,
720 sizeof(flow_key->nsh.context));
725 skb_postpush_rcsum(skb, nh, length);
729 /* Must follow skb_ensure_writable() since that can move the skb data. */
730 static void set_tp_port(struct sk_buff *skb, __be16 *port,
731 __be16 new_port, __sum16 *check)
733 inet_proto_csum_replace2(check, skb, *port, new_port, false);
737 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
738 const struct ovs_key_udp *key,
739 const struct ovs_key_udp *mask)
745 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
746 sizeof(struct udphdr));
751 /* Either of the masks is non-zero, so do not bother checking them. */
752 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
753 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
755 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
756 if (likely(src != uh->source)) {
757 set_tp_port(skb, &uh->source, src, &uh->check);
758 flow_key->tp.src = src;
760 if (likely(dst != uh->dest)) {
761 set_tp_port(skb, &uh->dest, dst, &uh->check);
762 flow_key->tp.dst = dst;
765 if (unlikely(!uh->check))
766 uh->check = CSUM_MANGLED_0;
770 flow_key->tp.src = src;
771 flow_key->tp.dst = dst;
779 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
780 const struct ovs_key_tcp *key,
781 const struct ovs_key_tcp *mask)
787 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
788 sizeof(struct tcphdr));
793 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
794 if (likely(src != th->source)) {
795 set_tp_port(skb, &th->source, src, &th->check);
796 flow_key->tp.src = src;
798 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
799 if (likely(dst != th->dest)) {
800 set_tp_port(skb, &th->dest, dst, &th->check);
801 flow_key->tp.dst = dst;
808 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
809 const struct ovs_key_sctp *key,
810 const struct ovs_key_sctp *mask)
812 unsigned int sctphoff = skb_transport_offset(skb);
814 __le32 old_correct_csum, new_csum, old_csum;
817 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
822 old_csum = sh->checksum;
823 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
825 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
826 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
828 new_csum = sctp_compute_cksum(skb, sctphoff);
830 /* Carry any checksum errors through. */
831 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
834 flow_key->tp.src = sh->source;
835 flow_key->tp.dst = sh->dest;
840 static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
842 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
843 struct vport *vport = data->vport;
845 if (skb_cow_head(skb, data->l2_len) < 0) {
850 __skb_dst_copy(skb, data->dst);
851 *OVS_CB(skb) = data->cb;
852 skb->inner_protocol = data->inner_protocol;
853 skb->vlan_tci = data->vlan_tci;
854 skb->vlan_proto = data->vlan_proto;
856 /* Reconstruct the MAC header. */
857 skb_push(skb, data->l2_len);
858 memcpy(skb->data, &data->l2_data, data->l2_len);
859 skb_postpush_rcsum(skb, skb->data, data->l2_len);
860 skb_reset_mac_header(skb);
862 if (eth_p_mpls(skb->protocol)) {
863 skb->inner_network_header = skb->network_header;
864 skb_set_network_header(skb, data->network_offset);
865 skb_reset_mac_len(skb);
868 ovs_vport_send(vport, skb, data->mac_proto);
873 ovs_dst_get_mtu(const struct dst_entry *dst)
875 return dst->dev->mtu;
878 static struct dst_ops ovs_dst_ops = {
880 .mtu = ovs_dst_get_mtu,
883 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
884 * ovs_vport_output(), which is called once per fragmented packet.
886 static void prepare_frag(struct vport *vport, struct sk_buff *skb,
887 u16 orig_network_offset, u8 mac_proto)
889 unsigned int hlen = skb_network_offset(skb);
890 struct ovs_frag_data *data;
892 data = this_cpu_ptr(&ovs_frag_data_storage);
893 data->dst = skb->_skb_refdst;
895 data->cb = *OVS_CB(skb);
896 data->inner_protocol = skb->inner_protocol;
897 data->network_offset = orig_network_offset;
898 data->vlan_tci = skb->vlan_tci;
899 data->vlan_proto = skb->vlan_proto;
900 data->mac_proto = mac_proto;
902 memcpy(&data->l2_data, skb->data, hlen);
904 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
908 static void ovs_fragment(struct net *net, struct vport *vport,
909 struct sk_buff *skb, u16 mru,
910 struct sw_flow_key *key)
912 u16 orig_network_offset = 0;
914 if (eth_p_mpls(skb->protocol)) {
915 orig_network_offset = skb_network_offset(skb);
916 skb->network_header = skb->inner_network_header;
919 if (skb_network_offset(skb) > MAX_L2_LEN) {
920 OVS_NLERR(1, "L2 header too long to fragment");
924 if (key->eth.type == htons(ETH_P_IP)) {
925 struct rtable ovs_rt = { 0 };
926 unsigned long orig_dst;
928 prepare_frag(vport, skb, orig_network_offset,
929 ovs_key_mac_proto(key));
930 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
931 DST_OBSOLETE_NONE, DST_NOCOUNT);
932 ovs_rt.dst.dev = vport->dev;
934 orig_dst = skb->_skb_refdst;
935 skb_dst_set_noref(skb, &ovs_rt.dst);
936 IPCB(skb)->frag_max_size = mru;
938 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
939 refdst_drop(orig_dst);
940 } else if (key->eth.type == htons(ETH_P_IPV6)) {
941 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
942 unsigned long orig_dst;
943 struct rt6_info ovs_rt;
948 prepare_frag(vport, skb, orig_network_offset,
949 ovs_key_mac_proto(key));
950 memset(&ovs_rt, 0, sizeof(ovs_rt));
951 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
952 DST_OBSOLETE_NONE, DST_NOCOUNT);
953 ovs_rt.dst.dev = vport->dev;
955 orig_dst = skb->_skb_refdst;
956 skb_dst_set_noref(skb, &ovs_rt.dst);
957 IP6CB(skb)->frag_max_size = mru;
959 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
960 refdst_drop(orig_dst);
962 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
963 ovs_vport_name(vport), ntohs(key->eth.type), mru,
973 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
974 struct sw_flow_key *key)
976 struct vport *vport = ovs_vport_rcu(dp, out_port);
979 u16 mru = OVS_CB(skb)->mru;
980 u32 cutlen = OVS_CB(skb)->cutlen;
982 if (unlikely(cutlen > 0)) {
983 if (skb->len - cutlen > ovs_mac_header_len(key))
984 pskb_trim(skb, skb->len - cutlen);
986 pskb_trim(skb, ovs_mac_header_len(key));
990 (skb->len <= mru + vport->dev->hard_header_len))) {
991 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
992 } else if (mru <= vport->dev->mtu) {
993 struct net *net = read_pnet(&dp->net);
995 ovs_fragment(net, vport, skb, mru, key);
1004 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
1005 struct sw_flow_key *key, const struct nlattr *attr,
1006 const struct nlattr *actions, int actions_len,
1009 struct dp_upcall_info upcall;
1010 const struct nlattr *a;
1013 memset(&upcall, 0, sizeof(upcall));
1014 upcall.cmd = OVS_PACKET_CMD_ACTION;
1015 upcall.mru = OVS_CB(skb)->mru;
1017 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
1018 a = nla_next(a, &rem)) {
1019 switch (nla_type(a)) {
1020 case OVS_USERSPACE_ATTR_USERDATA:
1021 upcall.userdata = a;
1024 case OVS_USERSPACE_ATTR_PID:
1025 upcall.portid = nla_get_u32(a);
1028 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
1029 /* Get out tunnel info. */
1030 struct vport *vport;
1032 vport = ovs_vport_rcu(dp, nla_get_u32(a));
1036 err = dev_fill_metadata_dst(vport->dev, skb);
1038 upcall.egress_tun_info = skb_tunnel_info(skb);
1044 case OVS_USERSPACE_ATTR_ACTIONS: {
1045 /* Include actions. */
1046 upcall.actions = actions;
1047 upcall.actions_len = actions_len;
1051 } /* End of switch. */
1054 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
1057 /* When 'last' is true, sample() should always consume the 'skb'.
1058 * Otherwise, sample() should keep 'skb' intact regardless what
1059 * actions are executed within sample().
1061 static int sample(struct datapath *dp, struct sk_buff *skb,
1062 struct sw_flow_key *key, const struct nlattr *attr,
1065 struct nlattr *actions;
1066 struct nlattr *sample_arg;
1067 int rem = nla_len(attr);
1068 const struct sample_arg *arg;
1069 bool clone_flow_key;
1071 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1072 sample_arg = nla_data(attr);
1073 arg = nla_data(sample_arg);
1074 actions = nla_next(sample_arg, &rem);
1076 if ((arg->probability != U32_MAX) &&
1077 (!arg->probability || prandom_u32() > arg->probability)) {
1083 clone_flow_key = !arg->exec;
1084 return clone_execute(dp, skb, key, 0, actions, rem, last,
1088 /* When 'last' is true, clone() should always consume the 'skb'.
1089 * Otherwise, clone() should keep 'skb' intact regardless what
1090 * actions are executed within clone().
1092 static int clone(struct datapath *dp, struct sk_buff *skb,
1093 struct sw_flow_key *key, const struct nlattr *attr,
1096 struct nlattr *actions;
1097 struct nlattr *clone_arg;
1098 int rem = nla_len(attr);
1099 bool dont_clone_flow_key;
1101 /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
1102 clone_arg = nla_data(attr);
1103 dont_clone_flow_key = nla_get_u32(clone_arg);
1104 actions = nla_next(clone_arg, &rem);
1106 return clone_execute(dp, skb, key, 0, actions, rem, last,
1107 !dont_clone_flow_key);
1110 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1111 const struct nlattr *attr)
1113 struct ovs_action_hash *hash_act = nla_data(attr);
1116 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
1117 hash = skb_get_hash(skb);
1118 hash = jhash_1word(hash, hash_act->hash_basis);
1122 key->ovs_flow_hash = hash;
1125 static int execute_set_action(struct sk_buff *skb,
1126 struct sw_flow_key *flow_key,
1127 const struct nlattr *a)
1129 /* Only tunnel set execution is supported without a mask. */
1130 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1131 struct ovs_tunnel_info *tun = nla_data(a);
1134 dst_hold((struct dst_entry *)tun->tun_dst);
1135 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1142 /* Mask is at the midpoint of the data. */
1143 #define get_mask(a, type) ((const type)nla_data(a) + 1)
1145 static int execute_masked_set_action(struct sk_buff *skb,
1146 struct sw_flow_key *flow_key,
1147 const struct nlattr *a)
1151 switch (nla_type(a)) {
1152 case OVS_KEY_ATTR_PRIORITY:
1153 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1154 *get_mask(a, u32 *));
1155 flow_key->phy.priority = skb->priority;
1158 case OVS_KEY_ATTR_SKB_MARK:
1159 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1160 flow_key->phy.skb_mark = skb->mark;
1163 case OVS_KEY_ATTR_TUNNEL_INFO:
1164 /* Masked data not supported for tunnel. */
1168 case OVS_KEY_ATTR_ETHERNET:
1169 err = set_eth_addr(skb, flow_key, nla_data(a),
1170 get_mask(a, struct ovs_key_ethernet *));
1173 case OVS_KEY_ATTR_NSH:
1174 err = set_nsh(skb, flow_key, a);
1177 case OVS_KEY_ATTR_IPV4:
1178 err = set_ipv4(skb, flow_key, nla_data(a),
1179 get_mask(a, struct ovs_key_ipv4 *));
1182 case OVS_KEY_ATTR_IPV6:
1183 err = set_ipv6(skb, flow_key, nla_data(a),
1184 get_mask(a, struct ovs_key_ipv6 *));
1187 case OVS_KEY_ATTR_TCP:
1188 err = set_tcp(skb, flow_key, nla_data(a),
1189 get_mask(a, struct ovs_key_tcp *));
1192 case OVS_KEY_ATTR_UDP:
1193 err = set_udp(skb, flow_key, nla_data(a),
1194 get_mask(a, struct ovs_key_udp *));
1197 case OVS_KEY_ATTR_SCTP:
1198 err = set_sctp(skb, flow_key, nla_data(a),
1199 get_mask(a, struct ovs_key_sctp *));
1202 case OVS_KEY_ATTR_MPLS:
1203 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1207 case OVS_KEY_ATTR_CT_STATE:
1208 case OVS_KEY_ATTR_CT_ZONE:
1209 case OVS_KEY_ATTR_CT_MARK:
1210 case OVS_KEY_ATTR_CT_LABELS:
1211 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1212 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1220 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1221 struct sw_flow_key *key,
1222 const struct nlattr *a, bool last)
1226 if (!is_flow_key_valid(key)) {
1229 err = ovs_flow_key_update(skb, key);
1233 BUG_ON(!is_flow_key_valid(key));
1235 recirc_id = nla_get_u32(a);
1236 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1239 /* Execute a list of actions against 'skb'. */
1240 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1241 struct sw_flow_key *key,
1242 const struct nlattr *attr, int len)
1244 const struct nlattr *a;
1247 for (a = attr, rem = len; rem > 0;
1248 a = nla_next(a, &rem)) {
1251 switch (nla_type(a)) {
1252 case OVS_ACTION_ATTR_OUTPUT: {
1253 int port = nla_get_u32(a);
1254 struct sk_buff *clone;
1256 /* Every output action needs a separate clone
1257 * of 'skb', In case the output action is the
1258 * last action, cloning can be avoided.
1260 if (nla_is_last(a, rem)) {
1261 do_output(dp, skb, port, key);
1262 /* 'skb' has been used for output.
1267 clone = skb_clone(skb, GFP_ATOMIC);
1269 do_output(dp, clone, port, key);
1270 OVS_CB(skb)->cutlen = 0;
1274 case OVS_ACTION_ATTR_TRUNC: {
1275 struct ovs_action_trunc *trunc = nla_data(a);
1277 if (skb->len > trunc->max_len)
1278 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1282 case OVS_ACTION_ATTR_USERSPACE:
1283 output_userspace(dp, skb, key, a, attr,
1284 len, OVS_CB(skb)->cutlen);
1285 OVS_CB(skb)->cutlen = 0;
1288 case OVS_ACTION_ATTR_HASH:
1289 execute_hash(skb, key, a);
1292 case OVS_ACTION_ATTR_PUSH_MPLS:
1293 err = push_mpls(skb, key, nla_data(a));
1296 case OVS_ACTION_ATTR_POP_MPLS:
1297 err = pop_mpls(skb, key, nla_get_be16(a));
1300 case OVS_ACTION_ATTR_PUSH_VLAN:
1301 err = push_vlan(skb, key, nla_data(a));
1304 case OVS_ACTION_ATTR_POP_VLAN:
1305 err = pop_vlan(skb, key);
1308 case OVS_ACTION_ATTR_RECIRC: {
1309 bool last = nla_is_last(a, rem);
1311 err = execute_recirc(dp, skb, key, a, last);
1313 /* If this is the last action, the skb has
1314 * been consumed or freed.
1315 * Return immediately.
1322 case OVS_ACTION_ATTR_SET:
1323 err = execute_set_action(skb, key, nla_data(a));
1326 case OVS_ACTION_ATTR_SET_MASKED:
1327 case OVS_ACTION_ATTR_SET_TO_MASKED:
1328 err = execute_masked_set_action(skb, key, nla_data(a));
1331 case OVS_ACTION_ATTR_SAMPLE: {
1332 bool last = nla_is_last(a, rem);
1334 err = sample(dp, skb, key, a, last);
1341 case OVS_ACTION_ATTR_CT:
1342 if (!is_flow_key_valid(key)) {
1343 err = ovs_flow_key_update(skb, key);
1348 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1351 /* Hide stolen IP fragments from user space. */
1353 return err == -EINPROGRESS ? 0 : err;
1356 case OVS_ACTION_ATTR_CT_CLEAR:
1357 err = ovs_ct_clear(skb, key);
1360 case OVS_ACTION_ATTR_PUSH_ETH:
1361 err = push_eth(skb, key, nla_data(a));
1364 case OVS_ACTION_ATTR_POP_ETH:
1365 err = pop_eth(skb, key);
1368 case OVS_ACTION_ATTR_PUSH_NSH: {
1369 u8 buffer[NSH_HDR_MAX_LEN];
1370 struct nshhdr *nh = (struct nshhdr *)buffer;
1372 err = nsh_hdr_from_nlattr(nla_data(a), nh,
1376 err = push_nsh(skb, key, nh);
1380 case OVS_ACTION_ATTR_POP_NSH:
1381 err = pop_nsh(skb, key);
1384 case OVS_ACTION_ATTR_METER:
1385 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1391 case OVS_ACTION_ATTR_CLONE: {
1392 bool last = nla_is_last(a, rem);
1394 err = clone(dp, skb, key, a, last);
1402 if (unlikely(err)) {
1412 /* Execute the actions on the clone of the packet. The effect of the
1413 * execution does not affect the original 'skb' nor the original 'key'.
1415 * The execution may be deferred in case the actions can not be executed
1418 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1419 struct sw_flow_key *key, u32 recirc_id,
1420 const struct nlattr *actions, int len,
1421 bool last, bool clone_flow_key)
1423 struct deferred_action *da;
1424 struct sw_flow_key *clone;
1426 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1428 /* Out of memory, skip this action.
1433 /* When clone_flow_key is false, the 'key' will not be change
1434 * by the actions, then the 'key' can be used directly.
1435 * Otherwise, try to clone key from the next recursion level of
1436 * 'flow_keys'. If clone is successful, execute the actions
1437 * without deferring.
1439 clone = clone_flow_key ? clone_key(key) : key;
1443 if (actions) { /* Sample action */
1445 __this_cpu_inc(exec_actions_level);
1447 err = do_execute_actions(dp, skb, clone,
1451 __this_cpu_dec(exec_actions_level);
1452 } else { /* Recirc action */
1453 clone->recirc_id = recirc_id;
1454 ovs_dp_process_packet(skb, clone);
1459 /* Out of 'flow_keys' space. Defer actions */
1460 da = add_deferred_actions(skb, key, actions, len);
1462 if (!actions) { /* Recirc action */
1464 key->recirc_id = recirc_id;
1467 /* Out of per CPU action FIFO space. Drop the 'skb' and
1472 if (net_ratelimit()) {
1473 if (actions) { /* Sample action */
1474 pr_warn("%s: deferred action limit reached, drop sample action\n",
1476 } else { /* Recirc action */
1477 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1485 static void process_deferred_actions(struct datapath *dp)
1487 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1489 /* Do not touch the FIFO in case there is no deferred actions. */
1490 if (action_fifo_is_empty(fifo))
1493 /* Finishing executing all deferred actions. */
1495 struct deferred_action *da = action_fifo_get(fifo);
1496 struct sk_buff *skb = da->skb;
1497 struct sw_flow_key *key = &da->pkt_key;
1498 const struct nlattr *actions = da->actions;
1499 int actions_len = da->actions_len;
1502 do_execute_actions(dp, skb, key, actions, actions_len);
1504 ovs_dp_process_packet(skb, key);
1505 } while (!action_fifo_is_empty(fifo));
1507 /* Reset FIFO for the next packet. */
1508 action_fifo_init(fifo);
1511 /* Execute a list of actions against 'skb'. */
1512 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1513 const struct sw_flow_actions *acts,
1514 struct sw_flow_key *key)
1518 level = __this_cpu_inc_return(exec_actions_level);
1519 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1520 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1527 OVS_CB(skb)->acts_origlen = acts->orig_len;
1528 err = do_execute_actions(dp, skb, key,
1529 acts->actions, acts->actions_len);
1532 process_deferred_actions(dp);
1535 __this_cpu_dec(exec_actions_level);
1539 int action_fifos_init(void)
1541 action_fifos = alloc_percpu(struct action_fifo);
1545 flow_keys = alloc_percpu(struct action_flow_keys);
1547 free_percpu(action_fifos);
1554 void action_fifos_exit(void)
1556 free_percpu(action_fifos);
1557 free_percpu(flow_keys);