2 * Copyright (c) 2007-2017 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/in6.h>
30 #include <linux/if_arp.h>
31 #include <linux/if_vlan.h>
36 #include <net/ip6_fib.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
40 #include <net/sctp/checksum.h>
44 #include "conntrack.h"
47 struct deferred_action {
49 const struct nlattr *actions;
52 /* Store pkt_key clone when creating deferred action. */
53 struct sw_flow_key pkt_key;
56 #define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
57 struct ovs_frag_data {
61 __be16 inner_protocol;
62 u16 network_offset; /* valid only for MPLS */
67 u8 l2_data[MAX_L2_LEN];
70 static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
72 #define DEFERRED_ACTION_FIFO_SIZE 10
73 #define OVS_RECURSION_LIMIT 5
74 #define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
78 /* Deferred action fifo queue storage. */
79 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
82 struct action_flow_keys {
83 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
86 static struct action_fifo __percpu *action_fifos;
87 static struct action_flow_keys __percpu *flow_keys;
88 static DEFINE_PER_CPU(int, exec_actions_level);
90 /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
91 * space. Return NULL if out of key spaces.
93 static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
95 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
96 int level = this_cpu_read(exec_actions_level);
97 struct sw_flow_key *key = NULL;
99 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
100 key = &keys->key[level - 1];
107 static void action_fifo_init(struct action_fifo *fifo)
113 static bool action_fifo_is_empty(const struct action_fifo *fifo)
115 return (fifo->head == fifo->tail);
118 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
120 if (action_fifo_is_empty(fifo))
123 return &fifo->fifo[fifo->tail++];
126 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
128 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
131 return &fifo->fifo[fifo->head++];
134 /* Return true if fifo is not full */
135 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
136 const struct sw_flow_key *key,
137 const struct nlattr *actions,
138 const int actions_len)
140 struct action_fifo *fifo;
141 struct deferred_action *da;
143 fifo = this_cpu_ptr(action_fifos);
144 da = action_fifo_put(fifo);
147 da->actions = actions;
148 da->actions_len = actions_len;
155 static void invalidate_flow_key(struct sw_flow_key *key)
157 key->mac_proto |= SW_FLOW_KEY_INVALID;
160 static bool is_flow_key_valid(const struct sw_flow_key *key)
162 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
165 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
166 struct sw_flow_key *key,
168 const struct nlattr *actions, int len,
169 bool last, bool clone_flow_key);
171 static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
174 if (skb->ip_summed == CHECKSUM_COMPLETE) {
175 __be16 diff[] = { ~(hdr->h_proto), ethertype };
177 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
180 hdr->h_proto = ethertype;
183 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
184 const struct ovs_action_push_mpls *mpls)
186 struct mpls_shim_hdr *new_mpls_lse;
188 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
189 if (skb->encapsulation)
192 if (skb_cow_head(skb, MPLS_HLEN) < 0)
195 if (!skb->inner_protocol) {
196 skb_set_inner_network_header(skb, skb->mac_len);
197 skb_set_inner_protocol(skb, skb->protocol);
200 skb_push(skb, MPLS_HLEN);
201 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
203 skb_reset_mac_header(skb);
204 skb_set_network_header(skb, skb->mac_len);
206 new_mpls_lse = mpls_hdr(skb);
207 new_mpls_lse->label_stack_entry = mpls->mpls_lse;
209 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
211 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET)
212 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
213 skb->protocol = mpls->mpls_ethertype;
215 invalidate_flow_key(key);
219 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
220 const __be16 ethertype)
224 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
228 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
230 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
233 __skb_pull(skb, MPLS_HLEN);
234 skb_reset_mac_header(skb);
235 skb_set_network_header(skb, skb->mac_len);
237 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET) {
240 /* mpls_hdr() is used to locate the ethertype field correctly in the
241 * presence of VLAN tags.
243 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
244 update_ethertype(skb, hdr, ethertype);
246 if (eth_p_mpls(skb->protocol))
247 skb->protocol = ethertype;
249 invalidate_flow_key(key);
253 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
254 const __be32 *mpls_lse, const __be32 *mask)
256 struct mpls_shim_hdr *stack;
260 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
264 stack = mpls_hdr(skb);
265 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
266 if (skb->ip_summed == CHECKSUM_COMPLETE) {
267 __be32 diff[] = { ~(stack->label_stack_entry), lse };
269 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
272 stack->label_stack_entry = lse;
273 flow_key->mpls.top_lse = lse;
277 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
281 err = skb_vlan_pop(skb);
282 if (skb_vlan_tag_present(skb)) {
283 invalidate_flow_key(key);
285 key->eth.vlan.tci = 0;
286 key->eth.vlan.tpid = 0;
291 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
292 const struct ovs_action_push_vlan *vlan)
294 if (skb_vlan_tag_present(skb)) {
295 invalidate_flow_key(key);
297 key->eth.vlan.tci = vlan->vlan_tci;
298 key->eth.vlan.tpid = vlan->vlan_tpid;
300 return skb_vlan_push(skb, vlan->vlan_tpid,
301 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
304 /* 'src' is already properly masked. */
305 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
307 u16 *dst = (u16 *)dst_;
308 const u16 *src = (const u16 *)src_;
309 const u16 *mask = (const u16 *)mask_;
311 OVS_SET_MASKED(dst[0], src[0], mask[0]);
312 OVS_SET_MASKED(dst[1], src[1], mask[1]);
313 OVS_SET_MASKED(dst[2], src[2], mask[2]);
316 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
317 const struct ovs_key_ethernet *key,
318 const struct ovs_key_ethernet *mask)
322 err = skb_ensure_writable(skb, ETH_HLEN);
326 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
328 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
330 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
333 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
335 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
336 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
340 /* pop_eth does not support VLAN packets as this action is never called
343 static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
345 skb_pull_rcsum(skb, ETH_HLEN);
346 skb_reset_mac_header(skb);
347 skb_reset_mac_len(skb);
349 /* safe right before invalidate_flow_key */
350 key->mac_proto = MAC_PROTO_NONE;
351 invalidate_flow_key(key);
355 static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
356 const struct ovs_action_push_eth *ethh)
360 /* Add the new Ethernet header */
361 if (skb_cow_head(skb, ETH_HLEN) < 0)
364 skb_push(skb, ETH_HLEN);
365 skb_reset_mac_header(skb);
366 skb_reset_mac_len(skb);
369 ether_addr_copy(hdr->h_source, ethh->addresses.eth_src);
370 ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst);
371 hdr->h_proto = skb->protocol;
373 skb_postpush_rcsum(skb, hdr, ETH_HLEN);
375 /* safe right before invalidate_flow_key */
376 key->mac_proto = MAC_PROTO_ETHERNET;
377 invalidate_flow_key(key);
381 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
382 __be32 addr, __be32 new_addr)
384 int transport_len = skb->len - skb_transport_offset(skb);
386 if (nh->frag_off & htons(IP_OFFSET))
389 if (nh->protocol == IPPROTO_TCP) {
390 if (likely(transport_len >= sizeof(struct tcphdr)))
391 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
392 addr, new_addr, true);
393 } else if (nh->protocol == IPPROTO_UDP) {
394 if (likely(transport_len >= sizeof(struct udphdr))) {
395 struct udphdr *uh = udp_hdr(skb);
397 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
398 inet_proto_csum_replace4(&uh->check, skb,
399 addr, new_addr, true);
401 uh->check = CSUM_MANGLED_0;
407 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
408 __be32 *addr, __be32 new_addr)
410 update_ip_l4_checksum(skb, nh, *addr, new_addr);
411 csum_replace4(&nh->check, *addr, new_addr);
416 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
417 __be32 addr[4], const __be32 new_addr[4])
419 int transport_len = skb->len - skb_transport_offset(skb);
421 if (l4_proto == NEXTHDR_TCP) {
422 if (likely(transport_len >= sizeof(struct tcphdr)))
423 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
424 addr, new_addr, true);
425 } else if (l4_proto == NEXTHDR_UDP) {
426 if (likely(transport_len >= sizeof(struct udphdr))) {
427 struct udphdr *uh = udp_hdr(skb);
429 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
430 inet_proto_csum_replace16(&uh->check, skb,
431 addr, new_addr, true);
433 uh->check = CSUM_MANGLED_0;
436 } else if (l4_proto == NEXTHDR_ICMP) {
437 if (likely(transport_len >= sizeof(struct icmp6hdr)))
438 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
439 skb, addr, new_addr, true);
443 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
444 const __be32 mask[4], __be32 masked[4])
446 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
447 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
448 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
449 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
452 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
453 __be32 addr[4], const __be32 new_addr[4],
454 bool recalculate_csum)
456 if (recalculate_csum)
457 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
460 memcpy(addr, new_addr, sizeof(__be32[4]));
463 static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
465 u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
467 ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
469 if (skb->ip_summed == CHECKSUM_COMPLETE)
470 csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
471 (__force __wsum)(ipv6_tclass << 12));
473 ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
476 static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
480 ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
481 fl = OVS_MASKED(ofl, fl, mask);
483 /* Bits 21-24 are always unmasked, so this retains their values. */
484 nh->flow_lbl[0] = (u8)(fl >> 16);
485 nh->flow_lbl[1] = (u8)(fl >> 8);
486 nh->flow_lbl[2] = (u8)fl;
488 if (skb->ip_summed == CHECKSUM_COMPLETE)
489 csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
492 static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
494 new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
496 if (skb->ip_summed == CHECKSUM_COMPLETE)
497 csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
498 (__force __wsum)(new_ttl << 8));
499 nh->hop_limit = new_ttl;
502 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
505 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
507 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
511 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
512 const struct ovs_key_ipv4 *key,
513 const struct ovs_key_ipv4 *mask)
519 err = skb_ensure_writable(skb, skb_network_offset(skb) +
520 sizeof(struct iphdr));
526 /* Setting an IP addresses is typically only a side effect of
527 * matching on them in the current userspace implementation, so it
528 * makes sense to check if the value actually changed.
530 if (mask->ipv4_src) {
531 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
533 if (unlikely(new_addr != nh->saddr)) {
534 set_ip_addr(skb, nh, &nh->saddr, new_addr);
535 flow_key->ipv4.addr.src = new_addr;
538 if (mask->ipv4_dst) {
539 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
541 if (unlikely(new_addr != nh->daddr)) {
542 set_ip_addr(skb, nh, &nh->daddr, new_addr);
543 flow_key->ipv4.addr.dst = new_addr;
546 if (mask->ipv4_tos) {
547 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
548 flow_key->ip.tos = nh->tos;
550 if (mask->ipv4_ttl) {
551 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
552 flow_key->ip.ttl = nh->ttl;
558 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
560 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
563 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
564 const struct ovs_key_ipv6 *key,
565 const struct ovs_key_ipv6 *mask)
570 err = skb_ensure_writable(skb, skb_network_offset(skb) +
571 sizeof(struct ipv6hdr));
577 /* Setting an IP addresses is typically only a side effect of
578 * matching on them in the current userspace implementation, so it
579 * makes sense to check if the value actually changed.
581 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
582 __be32 *saddr = (__be32 *)&nh->saddr;
585 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
587 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
588 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
590 memcpy(&flow_key->ipv6.addr.src, masked,
591 sizeof(flow_key->ipv6.addr.src));
594 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
595 unsigned int offset = 0;
596 int flags = IP6_FH_F_SKIP_RH;
597 bool recalc_csum = true;
598 __be32 *daddr = (__be32 *)&nh->daddr;
601 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
603 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
604 if (ipv6_ext_hdr(nh->nexthdr))
605 recalc_csum = (ipv6_find_hdr(skb, &offset,
610 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
612 memcpy(&flow_key->ipv6.addr.dst, masked,
613 sizeof(flow_key->ipv6.addr.dst));
616 if (mask->ipv6_tclass) {
617 set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
618 flow_key->ip.tos = ipv6_get_dsfield(nh);
620 if (mask->ipv6_label) {
621 set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
622 ntohl(mask->ipv6_label));
623 flow_key->ipv6.label =
624 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
626 if (mask->ipv6_hlimit) {
627 set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
628 flow_key->ip.ttl = nh->hop_limit;
633 /* Must follow skb_ensure_writable() since that can move the skb data. */
634 static void set_tp_port(struct sk_buff *skb, __be16 *port,
635 __be16 new_port, __sum16 *check)
637 inet_proto_csum_replace2(check, skb, *port, new_port, false);
641 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
642 const struct ovs_key_udp *key,
643 const struct ovs_key_udp *mask)
649 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
650 sizeof(struct udphdr));
655 /* Either of the masks is non-zero, so do not bother checking them. */
656 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
657 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
659 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
660 if (likely(src != uh->source)) {
661 set_tp_port(skb, &uh->source, src, &uh->check);
662 flow_key->tp.src = src;
664 if (likely(dst != uh->dest)) {
665 set_tp_port(skb, &uh->dest, dst, &uh->check);
666 flow_key->tp.dst = dst;
669 if (unlikely(!uh->check))
670 uh->check = CSUM_MANGLED_0;
674 flow_key->tp.src = src;
675 flow_key->tp.dst = dst;
683 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
684 const struct ovs_key_tcp *key,
685 const struct ovs_key_tcp *mask)
691 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
692 sizeof(struct tcphdr));
697 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
698 if (likely(src != th->source)) {
699 set_tp_port(skb, &th->source, src, &th->check);
700 flow_key->tp.src = src;
702 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
703 if (likely(dst != th->dest)) {
704 set_tp_port(skb, &th->dest, dst, &th->check);
705 flow_key->tp.dst = dst;
712 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
713 const struct ovs_key_sctp *key,
714 const struct ovs_key_sctp *mask)
716 unsigned int sctphoff = skb_transport_offset(skb);
718 __le32 old_correct_csum, new_csum, old_csum;
721 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
726 old_csum = sh->checksum;
727 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
729 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
730 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
732 new_csum = sctp_compute_cksum(skb, sctphoff);
734 /* Carry any checksum errors through. */
735 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
738 flow_key->tp.src = sh->source;
739 flow_key->tp.dst = sh->dest;
744 static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
746 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
747 struct vport *vport = data->vport;
749 if (skb_cow_head(skb, data->l2_len) < 0) {
754 __skb_dst_copy(skb, data->dst);
755 *OVS_CB(skb) = data->cb;
756 skb->inner_protocol = data->inner_protocol;
757 skb->vlan_tci = data->vlan_tci;
758 skb->vlan_proto = data->vlan_proto;
760 /* Reconstruct the MAC header. */
761 skb_push(skb, data->l2_len);
762 memcpy(skb->data, &data->l2_data, data->l2_len);
763 skb_postpush_rcsum(skb, skb->data, data->l2_len);
764 skb_reset_mac_header(skb);
766 if (eth_p_mpls(skb->protocol)) {
767 skb->inner_network_header = skb->network_header;
768 skb_set_network_header(skb, data->network_offset);
769 skb_reset_mac_len(skb);
772 ovs_vport_send(vport, skb, data->mac_proto);
777 ovs_dst_get_mtu(const struct dst_entry *dst)
779 return dst->dev->mtu;
782 static struct dst_ops ovs_dst_ops = {
784 .mtu = ovs_dst_get_mtu,
787 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
788 * ovs_vport_output(), which is called once per fragmented packet.
790 static void prepare_frag(struct vport *vport, struct sk_buff *skb,
791 u16 orig_network_offset, u8 mac_proto)
793 unsigned int hlen = skb_network_offset(skb);
794 struct ovs_frag_data *data;
796 data = this_cpu_ptr(&ovs_frag_data_storage);
797 data->dst = skb->_skb_refdst;
799 data->cb = *OVS_CB(skb);
800 data->inner_protocol = skb->inner_protocol;
801 data->network_offset = orig_network_offset;
802 data->vlan_tci = skb->vlan_tci;
803 data->vlan_proto = skb->vlan_proto;
804 data->mac_proto = mac_proto;
806 memcpy(&data->l2_data, skb->data, hlen);
808 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
812 static void ovs_fragment(struct net *net, struct vport *vport,
813 struct sk_buff *skb, u16 mru,
814 struct sw_flow_key *key)
816 u16 orig_network_offset = 0;
818 if (eth_p_mpls(skb->protocol)) {
819 orig_network_offset = skb_network_offset(skb);
820 skb->network_header = skb->inner_network_header;
823 if (skb_network_offset(skb) > MAX_L2_LEN) {
824 OVS_NLERR(1, "L2 header too long to fragment");
828 if (key->eth.type == htons(ETH_P_IP)) {
829 struct rtable ovs_rt = { 0 };
830 unsigned long orig_dst;
832 prepare_frag(vport, skb, orig_network_offset,
833 ovs_key_mac_proto(key));
834 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
835 DST_OBSOLETE_NONE, DST_NOCOUNT);
836 ovs_rt.dst.dev = vport->dev;
838 orig_dst = skb->_skb_refdst;
839 skb_dst_set_noref(skb, &ovs_rt.dst);
840 IPCB(skb)->frag_max_size = mru;
842 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
843 refdst_drop(orig_dst);
844 } else if (key->eth.type == htons(ETH_P_IPV6)) {
845 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
846 unsigned long orig_dst;
847 struct rt6_info ovs_rt;
852 prepare_frag(vport, skb, orig_network_offset,
853 ovs_key_mac_proto(key));
854 memset(&ovs_rt, 0, sizeof(ovs_rt));
855 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
856 DST_OBSOLETE_NONE, DST_NOCOUNT);
857 ovs_rt.dst.dev = vport->dev;
859 orig_dst = skb->_skb_refdst;
860 skb_dst_set_noref(skb, &ovs_rt.dst);
861 IP6CB(skb)->frag_max_size = mru;
863 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
864 refdst_drop(orig_dst);
866 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
867 ovs_vport_name(vport), ntohs(key->eth.type), mru,
877 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
878 struct sw_flow_key *key)
880 struct vport *vport = ovs_vport_rcu(dp, out_port);
883 u16 mru = OVS_CB(skb)->mru;
884 u32 cutlen = OVS_CB(skb)->cutlen;
886 if (unlikely(cutlen > 0)) {
887 if (skb->len - cutlen > ovs_mac_header_len(key))
888 pskb_trim(skb, skb->len - cutlen);
890 pskb_trim(skb, ovs_mac_header_len(key));
894 (skb->len <= mru + vport->dev->hard_header_len))) {
895 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
896 } else if (mru <= vport->dev->mtu) {
897 struct net *net = read_pnet(&dp->net);
899 ovs_fragment(net, vport, skb, mru, key);
908 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
909 struct sw_flow_key *key, const struct nlattr *attr,
910 const struct nlattr *actions, int actions_len,
913 struct dp_upcall_info upcall;
914 const struct nlattr *a;
917 memset(&upcall, 0, sizeof(upcall));
918 upcall.cmd = OVS_PACKET_CMD_ACTION;
919 upcall.mru = OVS_CB(skb)->mru;
921 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
922 a = nla_next(a, &rem)) {
923 switch (nla_type(a)) {
924 case OVS_USERSPACE_ATTR_USERDATA:
928 case OVS_USERSPACE_ATTR_PID:
929 upcall.portid = nla_get_u32(a);
932 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
933 /* Get out tunnel info. */
936 vport = ovs_vport_rcu(dp, nla_get_u32(a));
940 err = dev_fill_metadata_dst(vport->dev, skb);
942 upcall.egress_tun_info = skb_tunnel_info(skb);
948 case OVS_USERSPACE_ATTR_ACTIONS: {
949 /* Include actions. */
950 upcall.actions = actions;
951 upcall.actions_len = actions_len;
955 } /* End of switch. */
958 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
961 /* When 'last' is true, sample() should always consume the 'skb'.
962 * Otherwise, sample() should keep 'skb' intact regardless what
963 * actions are executed within sample().
965 static int sample(struct datapath *dp, struct sk_buff *skb,
966 struct sw_flow_key *key, const struct nlattr *attr,
969 struct nlattr *actions;
970 struct nlattr *sample_arg;
971 int rem = nla_len(attr);
972 const struct sample_arg *arg;
975 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
976 sample_arg = nla_data(attr);
977 arg = nla_data(sample_arg);
978 actions = nla_next(sample_arg, &rem);
980 if ((arg->probability != U32_MAX) &&
981 (!arg->probability || prandom_u32() > arg->probability)) {
987 clone_flow_key = !arg->exec;
988 return clone_execute(dp, skb, key, 0, actions, rem, last,
992 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
993 const struct nlattr *attr)
995 struct ovs_action_hash *hash_act = nla_data(attr);
998 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
999 hash = skb_get_hash(skb);
1000 hash = jhash_1word(hash, hash_act->hash_basis);
1004 key->ovs_flow_hash = hash;
1007 static int execute_set_action(struct sk_buff *skb,
1008 struct sw_flow_key *flow_key,
1009 const struct nlattr *a)
1011 /* Only tunnel set execution is supported without a mask. */
1012 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
1013 struct ovs_tunnel_info *tun = nla_data(a);
1016 dst_hold((struct dst_entry *)tun->tun_dst);
1017 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
1024 /* Mask is at the midpoint of the data. */
1025 #define get_mask(a, type) ((const type)nla_data(a) + 1)
1027 static int execute_masked_set_action(struct sk_buff *skb,
1028 struct sw_flow_key *flow_key,
1029 const struct nlattr *a)
1033 switch (nla_type(a)) {
1034 case OVS_KEY_ATTR_PRIORITY:
1035 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1036 *get_mask(a, u32 *));
1037 flow_key->phy.priority = skb->priority;
1040 case OVS_KEY_ATTR_SKB_MARK:
1041 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
1042 flow_key->phy.skb_mark = skb->mark;
1045 case OVS_KEY_ATTR_TUNNEL_INFO:
1046 /* Masked data not supported for tunnel. */
1050 case OVS_KEY_ATTR_ETHERNET:
1051 err = set_eth_addr(skb, flow_key, nla_data(a),
1052 get_mask(a, struct ovs_key_ethernet *));
1055 case OVS_KEY_ATTR_IPV4:
1056 err = set_ipv4(skb, flow_key, nla_data(a),
1057 get_mask(a, struct ovs_key_ipv4 *));
1060 case OVS_KEY_ATTR_IPV6:
1061 err = set_ipv6(skb, flow_key, nla_data(a),
1062 get_mask(a, struct ovs_key_ipv6 *));
1065 case OVS_KEY_ATTR_TCP:
1066 err = set_tcp(skb, flow_key, nla_data(a),
1067 get_mask(a, struct ovs_key_tcp *));
1070 case OVS_KEY_ATTR_UDP:
1071 err = set_udp(skb, flow_key, nla_data(a),
1072 get_mask(a, struct ovs_key_udp *));
1075 case OVS_KEY_ATTR_SCTP:
1076 err = set_sctp(skb, flow_key, nla_data(a),
1077 get_mask(a, struct ovs_key_sctp *));
1080 case OVS_KEY_ATTR_MPLS:
1081 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1085 case OVS_KEY_ATTR_CT_STATE:
1086 case OVS_KEY_ATTR_CT_ZONE:
1087 case OVS_KEY_ATTR_CT_MARK:
1088 case OVS_KEY_ATTR_CT_LABELS:
1089 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1090 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
1098 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1099 struct sw_flow_key *key,
1100 const struct nlattr *a, bool last)
1104 if (!is_flow_key_valid(key)) {
1107 err = ovs_flow_key_update(skb, key);
1111 BUG_ON(!is_flow_key_valid(key));
1113 recirc_id = nla_get_u32(a);
1114 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
1117 /* Execute a list of actions against 'skb'. */
1118 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1119 struct sw_flow_key *key,
1120 const struct nlattr *attr, int len)
1122 const struct nlattr *a;
1125 for (a = attr, rem = len; rem > 0;
1126 a = nla_next(a, &rem)) {
1129 switch (nla_type(a)) {
1130 case OVS_ACTION_ATTR_OUTPUT: {
1131 int port = nla_get_u32(a);
1132 struct sk_buff *clone;
1134 /* Every output action needs a separate clone
1135 * of 'skb', In case the output action is the
1136 * last action, cloning can be avoided.
1138 if (nla_is_last(a, rem)) {
1139 do_output(dp, skb, port, key);
1140 /* 'skb' has been used for output.
1145 clone = skb_clone(skb, GFP_ATOMIC);
1147 do_output(dp, clone, port, key);
1148 OVS_CB(skb)->cutlen = 0;
1152 case OVS_ACTION_ATTR_TRUNC: {
1153 struct ovs_action_trunc *trunc = nla_data(a);
1155 if (skb->len > trunc->max_len)
1156 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1160 case OVS_ACTION_ATTR_USERSPACE:
1161 output_userspace(dp, skb, key, a, attr,
1162 len, OVS_CB(skb)->cutlen);
1163 OVS_CB(skb)->cutlen = 0;
1166 case OVS_ACTION_ATTR_HASH:
1167 execute_hash(skb, key, a);
1170 case OVS_ACTION_ATTR_PUSH_MPLS:
1171 err = push_mpls(skb, key, nla_data(a));
1174 case OVS_ACTION_ATTR_POP_MPLS:
1175 err = pop_mpls(skb, key, nla_get_be16(a));
1178 case OVS_ACTION_ATTR_PUSH_VLAN:
1179 err = push_vlan(skb, key, nla_data(a));
1182 case OVS_ACTION_ATTR_POP_VLAN:
1183 err = pop_vlan(skb, key);
1186 case OVS_ACTION_ATTR_RECIRC: {
1187 bool last = nla_is_last(a, rem);
1189 err = execute_recirc(dp, skb, key, a, last);
1191 /* If this is the last action, the skb has
1192 * been consumed or freed.
1193 * Return immediately.
1200 case OVS_ACTION_ATTR_SET:
1201 err = execute_set_action(skb, key, nla_data(a));
1204 case OVS_ACTION_ATTR_SET_MASKED:
1205 case OVS_ACTION_ATTR_SET_TO_MASKED:
1206 err = execute_masked_set_action(skb, key, nla_data(a));
1209 case OVS_ACTION_ATTR_SAMPLE: {
1210 bool last = nla_is_last(a, rem);
1212 err = sample(dp, skb, key, a, last);
1219 case OVS_ACTION_ATTR_CT:
1220 if (!is_flow_key_valid(key)) {
1221 err = ovs_flow_key_update(skb, key);
1226 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1229 /* Hide stolen IP fragments from user space. */
1231 return err == -EINPROGRESS ? 0 : err;
1234 case OVS_ACTION_ATTR_PUSH_ETH:
1235 err = push_eth(skb, key, nla_data(a));
1238 case OVS_ACTION_ATTR_POP_ETH:
1239 err = pop_eth(skb, key);
1243 if (unlikely(err)) {
1253 /* Execute the actions on the clone of the packet. The effect of the
1254 * execution does not affect the original 'skb' nor the original 'key'.
1256 * The execution may be deferred in case the actions can not be executed
1259 static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1260 struct sw_flow_key *key, u32 recirc_id,
1261 const struct nlattr *actions, int len,
1262 bool last, bool clone_flow_key)
1264 struct deferred_action *da;
1265 struct sw_flow_key *clone;
1267 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1269 /* Out of memory, skip this action.
1274 /* When clone_flow_key is false, the 'key' will not be change
1275 * by the actions, then the 'key' can be used directly.
1276 * Otherwise, try to clone key from the next recursion level of
1277 * 'flow_keys'. If clone is successful, execute the actions
1278 * without deferring.
1280 clone = clone_flow_key ? clone_key(key) : key;
1284 if (actions) { /* Sample action */
1286 __this_cpu_inc(exec_actions_level);
1288 err = do_execute_actions(dp, skb, clone,
1292 __this_cpu_dec(exec_actions_level);
1293 } else { /* Recirc action */
1294 clone->recirc_id = recirc_id;
1295 ovs_dp_process_packet(skb, clone);
1300 /* Out of 'flow_keys' space. Defer actions */
1301 da = add_deferred_actions(skb, key, actions, len);
1303 if (!actions) { /* Recirc action */
1305 key->recirc_id = recirc_id;
1308 /* Out of per CPU action FIFO space. Drop the 'skb' and
1313 if (net_ratelimit()) {
1314 if (actions) { /* Sample action */
1315 pr_warn("%s: deferred action limit reached, drop sample action\n",
1317 } else { /* Recirc action */
1318 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1326 static void process_deferred_actions(struct datapath *dp)
1328 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1330 /* Do not touch the FIFO in case there is no deferred actions. */
1331 if (action_fifo_is_empty(fifo))
1334 /* Finishing executing all deferred actions. */
1336 struct deferred_action *da = action_fifo_get(fifo);
1337 struct sk_buff *skb = da->skb;
1338 struct sw_flow_key *key = &da->pkt_key;
1339 const struct nlattr *actions = da->actions;
1340 int actions_len = da->actions_len;
1343 do_execute_actions(dp, skb, key, actions, actions_len);
1345 ovs_dp_process_packet(skb, key);
1346 } while (!action_fifo_is_empty(fifo));
1348 /* Reset FIFO for the next packet. */
1349 action_fifo_init(fifo);
1352 /* Execute a list of actions against 'skb'. */
1353 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1354 const struct sw_flow_actions *acts,
1355 struct sw_flow_key *key)
1359 level = __this_cpu_inc_return(exec_actions_level);
1360 if (unlikely(level > OVS_RECURSION_LIMIT)) {
1361 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1368 OVS_CB(skb)->acts_origlen = acts->orig_len;
1369 err = do_execute_actions(dp, skb, key,
1370 acts->actions, acts->actions_len);
1373 process_deferred_actions(dp);
1376 __this_cpu_dec(exec_actions_level);
1380 int action_fifos_init(void)
1382 action_fifos = alloc_percpu(struct action_fifo);
1386 flow_keys = alloc_percpu(struct action_flow_keys);
1388 free_percpu(action_fifos);
1395 void action_fifos_exit(void)
1397 free_percpu(action_fifos);
1398 free_percpu(flow_keys);