1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vrf.c: device driver to encapsulate a VRF space
5 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
6 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
7 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
9 * Based on dummy, team and ipvlan drivers
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
17 #include <linux/init.h>
18 #include <linux/moduleparam.h>
19 #include <linux/netfilter.h>
20 #include <linux/rtnetlink.h>
21 #include <net/rtnetlink.h>
22 #include <linux/u64_stats_sync.h>
23 #include <linux/hashtable.h>
25 #include <linux/inetdevice.h>
28 #include <net/ip_fib.h>
29 #include <net/ip6_fib.h>
30 #include <net/ip6_route.h>
31 #include <net/route.h>
32 #include <net/addrconf.h>
33 #include <net/l3mdev.h>
34 #include <net/fib_rules.h>
35 #include <net/netns/generic.h>
36 #include <net/netfilter/nf_conntrack.h>
38 #define DRV_NAME "vrf"
39 #define DRV_VERSION "1.0"
41 #define FIB_RULE_PREF 1000 /* default preference for FIB rules */
43 static unsigned int vrf_net_id;
46 struct rtable __rcu *rth;
47 struct rt6_info __rcu *rt6;
48 #if IS_ENABLED(CONFIG_IPV6)
49 struct fib6_table *fib6_table;
61 struct u64_stats_sync syncp;
64 static void vrf_rx_stats(struct net_device *dev, int len)
66 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
68 u64_stats_update_begin(&dstats->syncp);
70 dstats->rx_bytes += len;
71 u64_stats_update_end(&dstats->syncp);
74 static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
76 vrf_dev->stats.tx_errors++;
80 static void vrf_get_stats64(struct net_device *dev,
81 struct rtnl_link_stats64 *stats)
85 for_each_possible_cpu(i) {
86 const struct pcpu_dstats *dstats;
87 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
90 dstats = per_cpu_ptr(dev->dstats, i);
92 start = u64_stats_fetch_begin_irq(&dstats->syncp);
93 tbytes = dstats->tx_bytes;
94 tpkts = dstats->tx_pkts;
95 tdrops = dstats->tx_drps;
96 rbytes = dstats->rx_bytes;
97 rpkts = dstats->rx_pkts;
98 } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
99 stats->tx_bytes += tbytes;
100 stats->tx_packets += tpkts;
101 stats->tx_dropped += tdrops;
102 stats->rx_bytes += rbytes;
103 stats->rx_packets += rpkts;
107 /* by default VRF devices do not have a qdisc and are expected
108 * to be created with only a single queue.
110 static bool qdisc_tx_is_default(const struct net_device *dev)
112 struct netdev_queue *txq;
115 if (dev->num_tx_queues > 1)
118 txq = netdev_get_tx_queue(dev, 0);
119 qdisc = rcu_access_pointer(txq->qdisc);
121 return !qdisc->enqueue;
124 /* Local traffic destined to local address. Reinsert the packet to rx
125 * path, similar to loopback handling.
127 static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
128 struct dst_entry *dst)
134 skb_dst_set(skb, dst);
136 /* set pkt_type to avoid skb hitting packet taps twice -
137 * once on Tx and again in Rx processing
139 skb->pkt_type = PACKET_LOOPBACK;
141 skb->protocol = eth_type_trans(skb, dev);
143 if (likely(netif_rx(skb) == NET_RX_SUCCESS))
144 vrf_rx_stats(dev, len);
146 this_cpu_inc(dev->dstats->rx_drps);
151 static void vrf_nf_set_untracked(struct sk_buff *skb)
153 if (skb_get_nfct(skb) == 0)
154 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
157 static void vrf_nf_reset_ct(struct sk_buff *skb)
159 if (skb_get_nfct(skb) == IP_CT_UNTRACKED)
163 #if IS_ENABLED(CONFIG_IPV6)
164 static int vrf_ip6_local_out(struct net *net, struct sock *sk,
169 vrf_nf_reset_ct(skb);
171 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
172 sk, skb, NULL, skb_dst(skb)->dev, dst_output);
174 if (likely(err == 1))
175 err = dst_output(net, sk, skb);
180 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
181 struct net_device *dev)
183 const struct ipv6hdr *iph;
184 struct net *net = dev_net(skb->dev);
186 int ret = NET_XMIT_DROP;
187 struct dst_entry *dst;
188 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
190 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
195 memset(&fl6, 0, sizeof(fl6));
196 /* needed to match OIF rule */
197 fl6.flowi6_oif = dev->ifindex;
198 fl6.flowi6_iif = LOOPBACK_IFINDEX;
199 fl6.daddr = iph->daddr;
200 fl6.saddr = iph->saddr;
201 fl6.flowlabel = ip6_flowinfo(iph);
202 fl6.flowi6_mark = skb->mark;
203 fl6.flowi6_proto = iph->nexthdr;
204 fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
206 dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
207 if (IS_ERR(dst) || dst == dst_null)
212 /* if dst.dev is loopback or the VRF device again this is locally
213 * originated traffic destined to a local address. Short circuit
217 return vrf_local_xmit(skb, dev, dst);
219 skb_dst_set(skb, dst);
221 /* strip the ethernet header added for pass through VRF device */
222 __skb_pull(skb, skb_network_offset(skb));
224 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
225 ret = vrf_ip6_local_out(net, skb->sk, skb);
226 if (unlikely(net_xmit_eval(ret)))
227 dev->stats.tx_errors++;
229 ret = NET_XMIT_SUCCESS;
233 vrf_tx_error(dev, skb);
234 return NET_XMIT_DROP;
237 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
238 struct net_device *dev)
240 vrf_tx_error(dev, skb);
241 return NET_XMIT_DROP;
245 /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
246 static int vrf_ip_local_out(struct net *net, struct sock *sk,
251 vrf_nf_reset_ct(skb);
253 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
254 skb, NULL, skb_dst(skb)->dev, dst_output);
255 if (likely(err == 1))
256 err = dst_output(net, sk, skb);
261 static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
262 struct net_device *vrf_dev)
265 int ret = NET_XMIT_DROP;
267 struct net *net = dev_net(vrf_dev);
270 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
275 memset(&fl4, 0, sizeof(fl4));
276 /* needed to match OIF rule */
277 fl4.flowi4_oif = vrf_dev->ifindex;
278 fl4.flowi4_iif = LOOPBACK_IFINDEX;
279 fl4.flowi4_tos = RT_TOS(ip4h->tos);
280 fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
281 fl4.flowi4_proto = ip4h->protocol;
282 fl4.daddr = ip4h->daddr;
283 fl4.saddr = ip4h->saddr;
285 rt = ip_route_output_flow(net, &fl4, NULL);
291 /* if dst.dev is loopback or the VRF device again this is locally
292 * originated traffic destined to a local address. Short circuit
295 if (rt->dst.dev == vrf_dev)
296 return vrf_local_xmit(skb, vrf_dev, &rt->dst);
298 skb_dst_set(skb, &rt->dst);
300 /* strip the ethernet header added for pass through VRF device */
301 __skb_pull(skb, skb_network_offset(skb));
304 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
308 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
309 ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
310 if (unlikely(net_xmit_eval(ret)))
311 vrf_dev->stats.tx_errors++;
313 ret = NET_XMIT_SUCCESS;
318 vrf_tx_error(vrf_dev, skb);
322 static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
324 switch (skb->protocol) {
325 case htons(ETH_P_IP):
326 return vrf_process_v4_outbound(skb, dev);
327 case htons(ETH_P_IPV6):
328 return vrf_process_v6_outbound(skb, dev);
330 vrf_tx_error(dev, skb);
331 return NET_XMIT_DROP;
335 static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
338 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
340 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
341 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
343 u64_stats_update_begin(&dstats->syncp);
345 dstats->tx_bytes += len;
346 u64_stats_update_end(&dstats->syncp);
348 this_cpu_inc(dev->dstats->tx_drps);
354 static void vrf_finish_direct(struct sk_buff *skb)
356 struct net_device *vrf_dev = skb->dev;
358 if (!list_empty(&vrf_dev->ptype_all) &&
359 likely(skb_headroom(skb) >= ETH_HLEN)) {
360 struct ethhdr *eth = skb_push(skb, ETH_HLEN);
362 ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
363 eth_zero_addr(eth->h_dest);
364 eth->h_proto = skb->protocol;
367 dev_queue_xmit_nit(skb, vrf_dev);
368 rcu_read_unlock_bh();
370 skb_pull(skb, ETH_HLEN);
373 vrf_nf_reset_ct(skb);
376 #if IS_ENABLED(CONFIG_IPV6)
377 /* modelled after ip6_finish_output2 */
378 static int vrf_finish_output6(struct net *net, struct sock *sk,
381 struct dst_entry *dst = skb_dst(skb);
382 struct net_device *dev = dst->dev;
383 const struct in6_addr *nexthop;
384 struct neighbour *neigh;
387 vrf_nf_reset_ct(skb);
389 skb->protocol = htons(ETH_P_IPV6);
393 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
394 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
395 if (unlikely(!neigh))
396 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
397 if (!IS_ERR(neigh)) {
398 sock_confirm_neigh(skb, neigh);
399 ret = neigh_output(neigh, skb, false);
400 rcu_read_unlock_bh();
403 rcu_read_unlock_bh();
405 IP6_INC_STATS(dev_net(dst->dev),
406 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
411 /* modelled after ip6_output */
412 static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
414 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
415 net, sk, skb, NULL, skb_dst(skb)->dev,
417 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
420 /* set dst on skb to send packet to us via dev_xmit path. Allows
421 * packet to go through device based features such as qdisc, netfilter
422 * hooks and packet sockets with skb->dev set to vrf device.
424 static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
427 struct net_vrf *vrf = netdev_priv(vrf_dev);
428 struct dst_entry *dst = NULL;
429 struct rt6_info *rt6;
433 rt6 = rcu_dereference(vrf->rt6);
441 if (unlikely(!dst)) {
442 vrf_tx_error(vrf_dev, skb);
447 skb_dst_set(skb, dst);
452 static int vrf_output6_direct_finish(struct net *net, struct sock *sk,
455 vrf_finish_direct(skb);
457 return vrf_ip6_local_out(net, sk, skb);
460 static int vrf_output6_direct(struct net *net, struct sock *sk,
465 skb->protocol = htons(ETH_P_IPV6);
467 if (!(IPCB(skb)->flags & IPSKB_REROUTED))
468 err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb,
469 NULL, skb->dev, vrf_output6_direct_finish);
471 if (likely(err == 1))
472 vrf_finish_direct(skb);
477 static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk,
482 err = vrf_output6_direct(net, sk, skb);
483 if (likely(err == 1))
484 err = vrf_ip6_local_out(net, sk, skb);
489 static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
493 struct net *net = dev_net(vrf_dev);
498 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
499 skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
501 if (likely(err == 1))
502 err = vrf_output6_direct(net, sk, skb);
504 if (likely(err == 1))
510 static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
514 /* don't divert link scope packets */
515 if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
518 vrf_nf_set_untracked(skb);
520 if (qdisc_tx_is_default(vrf_dev) ||
521 IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
522 return vrf_ip6_out_direct(vrf_dev, sk, skb);
524 return vrf_ip6_out_redirect(vrf_dev, skb);
528 static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
530 struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
531 struct net *net = dev_net(dev);
532 struct dst_entry *dst;
534 RCU_INIT_POINTER(vrf->rt6, NULL);
537 /* move dev in dst's to loopback so this VRF device can be deleted
538 * - based on dst_ifdown
543 dst->dev = net->loopback_dev;
549 static int vrf_rt6_create(struct net_device *dev)
551 int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM;
552 struct net_vrf *vrf = netdev_priv(dev);
553 struct net *net = dev_net(dev);
554 struct rt6_info *rt6;
557 /* IPv6 can be CONFIG enabled and then disabled runtime */
558 if (!ipv6_mod_enabled())
561 vrf->fib6_table = fib6_new_table(net, vrf->tb_id);
562 if (!vrf->fib6_table)
565 /* create a dst for routing packets out a VRF device */
566 rt6 = ip6_dst_alloc(net, dev, flags);
570 rt6->dst.output = vrf_output6;
572 rcu_assign_pointer(vrf->rt6, rt6);
579 static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
586 static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
590 static int vrf_rt6_create(struct net_device *dev)
596 /* modelled after ip_finish_output2 */
597 static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
599 struct dst_entry *dst = skb_dst(skb);
600 struct rtable *rt = (struct rtable *)dst;
601 struct net_device *dev = dst->dev;
602 unsigned int hh_len = LL_RESERVED_SPACE(dev);
603 struct neighbour *neigh;
604 bool is_v6gw = false;
607 vrf_nf_reset_ct(skb);
609 /* Be paranoid, rather than too clever. */
610 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
611 struct sk_buff *skb2;
613 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
619 skb_set_owner_w(skb2, skb->sk);
627 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
628 if (!IS_ERR(neigh)) {
629 sock_confirm_neigh(skb, neigh);
630 /* if crossing protocols, can not use the cached header */
631 ret = neigh_output(neigh, skb, is_v6gw);
632 rcu_read_unlock_bh();
636 rcu_read_unlock_bh();
638 vrf_tx_error(skb->dev, skb);
642 static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
644 struct net_device *dev = skb_dst(skb)->dev;
646 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
649 skb->protocol = htons(ETH_P_IP);
651 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
652 net, sk, skb, NULL, dev,
654 !(IPCB(skb)->flags & IPSKB_REROUTED));
657 /* set dst on skb to send packet to us via dev_xmit path. Allows
658 * packet to go through device based features such as qdisc, netfilter
659 * hooks and packet sockets with skb->dev set to vrf device.
661 static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
664 struct net_vrf *vrf = netdev_priv(vrf_dev);
665 struct dst_entry *dst = NULL;
670 rth = rcu_dereference(vrf->rth);
678 if (unlikely(!dst)) {
679 vrf_tx_error(vrf_dev, skb);
684 skb_dst_set(skb, dst);
689 static int vrf_output_direct_finish(struct net *net, struct sock *sk,
692 vrf_finish_direct(skb);
694 return vrf_ip_local_out(net, sk, skb);
697 static int vrf_output_direct(struct net *net, struct sock *sk,
702 skb->protocol = htons(ETH_P_IP);
704 if (!(IPCB(skb)->flags & IPSKB_REROUTED))
705 err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb,
706 NULL, skb->dev, vrf_output_direct_finish);
708 if (likely(err == 1))
709 vrf_finish_direct(skb);
714 static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk,
719 err = vrf_output_direct(net, sk, skb);
720 if (likely(err == 1))
721 err = vrf_ip_local_out(net, sk, skb);
726 static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
730 struct net *net = dev_net(vrf_dev);
735 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
736 skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
738 if (likely(err == 1))
739 err = vrf_output_direct(net, sk, skb);
741 if (likely(err == 1))
747 static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
751 /* don't divert multicast or local broadcast */
752 if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
753 ipv4_is_lbcast(ip_hdr(skb)->daddr))
756 vrf_nf_set_untracked(skb);
758 if (qdisc_tx_is_default(vrf_dev) ||
759 IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
760 return vrf_ip_out_direct(vrf_dev, sk, skb);
762 return vrf_ip_out_redirect(vrf_dev, skb);
765 /* called with rcu lock held */
766 static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
773 return vrf_ip_out(vrf_dev, sk, skb);
775 return vrf_ip6_out(vrf_dev, sk, skb);
782 static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
784 struct rtable *rth = rtnl_dereference(vrf->rth);
785 struct net *net = dev_net(dev);
786 struct dst_entry *dst;
788 RCU_INIT_POINTER(vrf->rth, NULL);
791 /* move dev in dst's to loopback so this VRF device can be deleted
792 * - based on dst_ifdown
797 dst->dev = net->loopback_dev;
803 static int vrf_rtable_create(struct net_device *dev)
805 struct net_vrf *vrf = netdev_priv(dev);
808 if (!fib_new_table(dev_net(dev), vrf->tb_id))
811 /* create a dst for routing packets out through a VRF device */
812 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
816 rth->dst.output = vrf_output;
818 rcu_assign_pointer(vrf->rth, rth);
823 /**************************** device handling ********************/
825 /* cycle interface to flush neighbor cache and move routes across tables */
826 static void cycle_netdev(struct net_device *dev,
827 struct netlink_ext_ack *extack)
829 unsigned int flags = dev->flags;
832 if (!netif_running(dev))
835 ret = dev_change_flags(dev, flags & ~IFF_UP, extack);
837 ret = dev_change_flags(dev, flags, extack);
841 "Failed to cycle device %s; route tables might be wrong!\n",
846 static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
847 struct netlink_ext_ack *extack)
851 /* do not allow loopback device to be enslaved to a VRF.
852 * The vrf device acts as the loopback for the vrf.
854 if (port_dev == dev_net(dev)->loopback_dev) {
855 NL_SET_ERR_MSG(extack,
856 "Can not enslave loopback device to a VRF");
860 port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
861 ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL, extack);
865 cycle_netdev(port_dev, extack);
870 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
874 static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
875 struct netlink_ext_ack *extack)
877 if (netif_is_l3_master(port_dev)) {
878 NL_SET_ERR_MSG(extack,
879 "Can not enslave an L3 master device to a VRF");
883 if (netif_is_l3_slave(port_dev))
886 return do_vrf_add_slave(dev, port_dev, extack);
889 /* inverse of do_vrf_add_slave */
890 static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
892 netdev_upper_dev_unlink(port_dev, dev);
893 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
895 cycle_netdev(port_dev, NULL);
900 static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
902 return do_vrf_del_slave(dev, port_dev);
905 static void vrf_dev_uninit(struct net_device *dev)
907 struct net_vrf *vrf = netdev_priv(dev);
909 vrf_rtable_release(dev, vrf);
910 vrf_rt6_release(dev, vrf);
912 free_percpu(dev->dstats);
916 static int vrf_dev_init(struct net_device *dev)
918 struct net_vrf *vrf = netdev_priv(dev);
920 dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
924 /* create the default dst which points back to us */
925 if (vrf_rtable_create(dev) != 0)
928 if (vrf_rt6_create(dev) != 0)
931 dev->flags = IFF_MASTER | IFF_NOARP;
933 /* similarly, oper state is irrelevant; set to up to avoid confusion */
934 dev->operstate = IF_OPER_UP;
938 vrf_rtable_release(dev, vrf);
940 free_percpu(dev->dstats);
946 static const struct net_device_ops vrf_netdev_ops = {
947 .ndo_init = vrf_dev_init,
948 .ndo_uninit = vrf_dev_uninit,
949 .ndo_start_xmit = vrf_xmit,
950 .ndo_set_mac_address = eth_mac_addr,
951 .ndo_get_stats64 = vrf_get_stats64,
952 .ndo_add_slave = vrf_add_slave,
953 .ndo_del_slave = vrf_del_slave,
956 static u32 vrf_fib_table(const struct net_device *dev)
958 struct net_vrf *vrf = netdev_priv(dev);
963 static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
969 static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
971 struct net_device *dev)
973 struct net *net = dev_net(dev);
975 if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
976 skb = NULL; /* kfree_skb(skb) handled by nf code */
981 #if IS_ENABLED(CONFIG_IPV6)
982 /* neighbor handling is done with actual device; do not want
983 * to flip skb->dev for those ndisc packets. This really fails
984 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
987 static bool ipv6_ndisc_frame(const struct sk_buff *skb)
989 const struct ipv6hdr *iph = ipv6_hdr(skb);
992 if (iph->nexthdr == NEXTHDR_ICMP) {
993 const struct icmp6hdr *icmph;
994 struct icmp6hdr _icmph;
996 icmph = skb_header_pointer(skb, sizeof(*iph),
997 sizeof(_icmph), &_icmph);
1001 switch (icmph->icmp6_type) {
1002 case NDISC_ROUTER_SOLICITATION:
1003 case NDISC_ROUTER_ADVERTISEMENT:
1004 case NDISC_NEIGHBOUR_SOLICITATION:
1005 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1006 case NDISC_REDIRECT:
1016 static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
1017 const struct net_device *dev,
1020 const struct sk_buff *skb,
1023 struct net_vrf *vrf = netdev_priv(dev);
1025 return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags);
1028 static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
1031 const struct ipv6hdr *iph = ipv6_hdr(skb);
1032 struct flowi6 fl6 = {
1033 .flowi6_iif = ifindex,
1034 .flowi6_mark = skb->mark,
1035 .flowi6_proto = iph->nexthdr,
1036 .daddr = iph->daddr,
1037 .saddr = iph->saddr,
1038 .flowlabel = ip6_flowinfo(iph),
1040 struct net *net = dev_net(vrf_dev);
1041 struct rt6_info *rt6;
1043 rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb,
1044 RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
1048 if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
1051 skb_dst_set(skb, &rt6->dst);
1054 static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1055 struct sk_buff *skb)
1057 int orig_iif = skb->skb_iif;
1058 bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
1059 bool is_ndisc = ipv6_ndisc_frame(skb);
1061 /* loopback, multicast & non-ND link-local traffic; do not push through
1062 * packet taps again. Reset pkt_type for upper layers to process skb.
1063 * For strict packets with a source LLA, determine the dst using the
1066 if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
1068 skb->skb_iif = vrf_dev->ifindex;
1069 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1071 if (skb->pkt_type == PACKET_LOOPBACK)
1072 skb->pkt_type = PACKET_HOST;
1073 else if (ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)
1074 vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
1079 /* if packet is NDISC then keep the ingress interface */
1081 vrf_rx_stats(vrf_dev, skb->len);
1083 skb->skb_iif = vrf_dev->ifindex;
1085 if (!list_empty(&vrf_dev->ptype_all)) {
1086 skb_push(skb, skb->mac_len);
1087 dev_queue_xmit_nit(skb, vrf_dev);
1088 skb_pull(skb, skb->mac_len);
1091 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1095 vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
1097 skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
1103 static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1104 struct sk_buff *skb)
1110 static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
1111 struct sk_buff *skb)
1114 skb->skb_iif = vrf_dev->ifindex;
1115 IPCB(skb)->flags |= IPSKB_L3SLAVE;
1117 if (ipv4_is_multicast(ip_hdr(skb)->daddr))
1120 /* loopback traffic; do not push through packet taps again.
1121 * Reset pkt_type for upper layers to process skb
1123 if (skb->pkt_type == PACKET_LOOPBACK) {
1124 skb->pkt_type = PACKET_HOST;
1128 vrf_rx_stats(vrf_dev, skb->len);
1130 if (!list_empty(&vrf_dev->ptype_all)) {
1131 skb_push(skb, skb->mac_len);
1132 dev_queue_xmit_nit(skb, vrf_dev);
1133 skb_pull(skb, skb->mac_len);
1136 skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
1141 /* called with rcu lock held */
1142 static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
1143 struct sk_buff *skb,
1148 return vrf_ip_rcv(vrf_dev, skb);
1150 return vrf_ip6_rcv(vrf_dev, skb);
1156 #if IS_ENABLED(CONFIG_IPV6)
1157 /* send to link-local or multicast address via interface enslaved to
1158 * VRF device. Force lookup to VRF table without changing flow struct
1159 * Note: Caller to this function must hold rcu_read_lock() and no refcnt
1160 * is taken on the dst by this function.
1162 static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
1165 struct net *net = dev_net(dev);
1166 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_DST_NOREF;
1167 struct dst_entry *dst = NULL;
1168 struct rt6_info *rt;
1170 /* VRF device does not have a link-local address and
1171 * sending packets to link-local or mcast addresses over
1172 * a VRF device does not make sense
1174 if (fl6->flowi6_oif == dev->ifindex) {
1175 dst = &net->ipv6.ip6_null_entry->dst;
1179 if (!ipv6_addr_any(&fl6->saddr))
1180 flags |= RT6_LOOKUP_F_HAS_SADDR;
1182 rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags);
1190 static const struct l3mdev_ops vrf_l3mdev_ops = {
1191 .l3mdev_fib_table = vrf_fib_table,
1192 .l3mdev_l3_rcv = vrf_l3_rcv,
1193 .l3mdev_l3_out = vrf_l3_out,
1194 #if IS_ENABLED(CONFIG_IPV6)
1195 .l3mdev_link_scope_lookup = vrf_link_scope_lookup,
1199 static void vrf_get_drvinfo(struct net_device *dev,
1200 struct ethtool_drvinfo *info)
1202 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1203 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1206 static const struct ethtool_ops vrf_ethtool_ops = {
1207 .get_drvinfo = vrf_get_drvinfo,
1210 static inline size_t vrf_fib_rule_nl_size(void)
1214 sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
1215 sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */
1216 sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */
1217 sz += nla_total_size(sizeof(u8)); /* FRA_PROTOCOL */
1222 static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1224 struct fib_rule_hdr *frh;
1225 struct nlmsghdr *nlh;
1226 struct sk_buff *skb;
1229 if ((family == AF_INET6 || family == RTNL_FAMILY_IP6MR) &&
1230 !ipv6_mod_enabled())
1233 skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
1237 nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
1239 goto nla_put_failure;
1241 /* rule only needs to appear once */
1242 nlh->nlmsg_flags |= NLM_F_EXCL;
1244 frh = nlmsg_data(nlh);
1245 memset(frh, 0, sizeof(*frh));
1246 frh->family = family;
1247 frh->action = FR_ACT_TO_TBL;
1249 if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL))
1250 goto nla_put_failure;
1252 if (nla_put_u8(skb, FRA_L3MDEV, 1))
1253 goto nla_put_failure;
1255 if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
1256 goto nla_put_failure;
1258 nlmsg_end(skb, nlh);
1260 /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1261 skb->sk = dev_net(dev)->rtnl;
1263 err = fib_nl_newrule(skb, nlh, NULL);
1267 err = fib_nl_delrule(skb, nlh, NULL);
1281 static int vrf_add_fib_rules(const struct net_device *dev)
1285 err = vrf_fib_rule(dev, AF_INET, true);
1289 err = vrf_fib_rule(dev, AF_INET6, true);
1293 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1294 err = vrf_fib_rule(dev, RTNL_FAMILY_IPMR, true);
1299 #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1300 err = vrf_fib_rule(dev, RTNL_FAMILY_IP6MR, true);
1307 #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1309 vrf_fib_rule(dev, RTNL_FAMILY_IPMR, false);
1312 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1314 vrf_fib_rule(dev, AF_INET6, false);
1318 vrf_fib_rule(dev, AF_INET, false);
1321 netdev_err(dev, "Failed to add FIB rules.\n");
1325 static void vrf_setup(struct net_device *dev)
1329 /* Initialize the device structure. */
1330 dev->netdev_ops = &vrf_netdev_ops;
1331 dev->l3mdev_ops = &vrf_l3mdev_ops;
1332 dev->ethtool_ops = &vrf_ethtool_ops;
1333 dev->needs_free_netdev = true;
1335 /* Fill in device structure with ethernet-generic values. */
1336 eth_hw_addr_random(dev);
1338 /* don't acquire vrf device's netif_tx_lock when transmitting */
1339 dev->features |= NETIF_F_LLTX;
1341 /* don't allow vrf devices to change network namespaces. */
1342 dev->features |= NETIF_F_NETNS_LOCAL;
1344 /* does not make sense for a VLAN to be added to a vrf device */
1345 dev->features |= NETIF_F_VLAN_CHALLENGED;
1347 /* enable offload features */
1348 dev->features |= NETIF_F_GSO_SOFTWARE;
1349 dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC;
1350 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1352 dev->hw_features = dev->features;
1353 dev->hw_enc_features = dev->features;
1355 /* default to no qdisc; user can add if desired */
1356 dev->priv_flags |= IFF_NO_QUEUE;
1357 dev->priv_flags |= IFF_NO_RX_HANDLER;
1358 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1360 /* VRF devices do not care about MTU, but if the MTU is set
1361 * too low then the ipv4 and ipv6 protocols are disabled
1362 * which breaks networking.
1364 dev->min_mtu = IPV6_MIN_MTU;
1365 dev->max_mtu = IP6_MAX_MTU;
1366 dev->mtu = dev->max_mtu;
1369 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
1370 struct netlink_ext_ack *extack)
1372 if (tb[IFLA_ADDRESS]) {
1373 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1374 NL_SET_ERR_MSG(extack, "Invalid hardware address");
1377 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1378 NL_SET_ERR_MSG(extack, "Invalid hardware address");
1379 return -EADDRNOTAVAIL;
1385 static void vrf_dellink(struct net_device *dev, struct list_head *head)
1387 struct net_device *port_dev;
1388 struct list_head *iter;
1390 netdev_for_each_lower_dev(dev, port_dev, iter)
1391 vrf_del_slave(dev, port_dev);
1393 unregister_netdevice_queue(dev, head);
1396 static int vrf_newlink(struct net *src_net, struct net_device *dev,
1397 struct nlattr *tb[], struct nlattr *data[],
1398 struct netlink_ext_ack *extack)
1400 struct net_vrf *vrf = netdev_priv(dev);
1401 bool *add_fib_rules;
1405 if (!data || !data[IFLA_VRF_TABLE]) {
1406 NL_SET_ERR_MSG(extack, "VRF table id is missing");
1410 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
1411 if (vrf->tb_id == RT_TABLE_UNSPEC) {
1412 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE],
1413 "Invalid VRF table id");
1417 dev->priv_flags |= IFF_L3MDEV_MASTER;
1419 err = register_netdevice(dev);
1424 add_fib_rules = net_generic(net, vrf_net_id);
1425 if (*add_fib_rules) {
1426 err = vrf_add_fib_rules(dev);
1428 unregister_netdevice(dev);
1431 *add_fib_rules = false;
1438 static size_t vrf_nl_getsize(const struct net_device *dev)
1440 return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
1443 static int vrf_fillinfo(struct sk_buff *skb,
1444 const struct net_device *dev)
1446 struct net_vrf *vrf = netdev_priv(dev);
1448 return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
1451 static size_t vrf_get_slave_size(const struct net_device *bond_dev,
1452 const struct net_device *slave_dev)
1454 return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */
1457 static int vrf_fill_slave_info(struct sk_buff *skb,
1458 const struct net_device *vrf_dev,
1459 const struct net_device *slave_dev)
1461 struct net_vrf *vrf = netdev_priv(vrf_dev);
1463 if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
1469 static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
1470 [IFLA_VRF_TABLE] = { .type = NLA_U32 },
1473 static struct rtnl_link_ops vrf_link_ops __read_mostly = {
1475 .priv_size = sizeof(struct net_vrf),
1477 .get_size = vrf_nl_getsize,
1478 .policy = vrf_nl_policy,
1479 .validate = vrf_validate,
1480 .fill_info = vrf_fillinfo,
1482 .get_slave_size = vrf_get_slave_size,
1483 .fill_slave_info = vrf_fill_slave_info,
1485 .newlink = vrf_newlink,
1486 .dellink = vrf_dellink,
1488 .maxtype = IFLA_VRF_MAX,
1491 static int vrf_device_event(struct notifier_block *unused,
1492 unsigned long event, void *ptr)
1494 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1496 /* only care about unregister events to drop slave references */
1497 if (event == NETDEV_UNREGISTER) {
1498 struct net_device *vrf_dev;
1500 if (!netif_is_l3_slave(dev))
1503 vrf_dev = netdev_master_upper_dev_get(dev);
1504 vrf_del_slave(vrf_dev, dev);
1510 static struct notifier_block vrf_notifier_block __read_mostly = {
1511 .notifier_call = vrf_device_event,
1514 /* Initialize per network namespace state */
1515 static int __net_init vrf_netns_init(struct net *net)
1517 bool *add_fib_rules = net_generic(net, vrf_net_id);
1519 *add_fib_rules = true;
1524 static struct pernet_operations vrf_net_ops __net_initdata = {
1525 .init = vrf_netns_init,
1527 .size = sizeof(bool),
1530 static int __init vrf_init_module(void)
1534 register_netdevice_notifier(&vrf_notifier_block);
1536 rc = register_pernet_subsys(&vrf_net_ops);
1540 rc = rtnl_link_register(&vrf_link_ops);
1542 unregister_pernet_subsys(&vrf_net_ops);
1549 unregister_netdevice_notifier(&vrf_notifier_block);
1553 module_init(vrf_init_module);
1554 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1555 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1556 MODULE_LICENSE("GPL");
1557 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1558 MODULE_VERSION(DRV_VERSION);