2 * vrf.c: device driver to encapsulate a VRF space
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
8 * Based on dummy, team and ipvlan drivers
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
21 #include <linux/init.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netfilter.h>
24 #include <linux/rtnetlink.h>
25 #include <net/rtnetlink.h>
26 #include <linux/u64_stats_sync.h>
27 #include <linux/hashtable.h>
29 #include <linux/inetdevice.h>
32 #include <net/ip_fib.h>
33 #include <net/ip6_fib.h>
34 #include <net/ip6_route.h>
35 #include <net/route.h>
36 #include <net/addrconf.h>
37 #include <net/l3mdev.h>
38 #include <net/fib_rules.h>
39 #include <net/netns/generic.h>
41 #define DRV_NAME "vrf"
42 #define DRV_VERSION "1.0"
44 #define FIB_RULE_PREF 1000 /* default preference for FIB rules */
46 static unsigned int vrf_net_id;
49 struct rtable __rcu *rth;
50 struct rtable __rcu *rth_local;
51 struct rt6_info __rcu *rt6;
52 struct rt6_info __rcu *rt6_local;
63 struct u64_stats_sync syncp;
66 static void vrf_rx_stats(struct net_device *dev, int len)
68 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
70 u64_stats_update_begin(&dstats->syncp);
72 dstats->rx_bytes += len;
73 u64_stats_update_end(&dstats->syncp);
76 static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
78 vrf_dev->stats.tx_errors++;
82 static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
83 struct rtnl_link_stats64 *stats)
87 for_each_possible_cpu(i) {
88 const struct pcpu_dstats *dstats;
89 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
92 dstats = per_cpu_ptr(dev->dstats, i);
94 start = u64_stats_fetch_begin_irq(&dstats->syncp);
95 tbytes = dstats->tx_bytes;
96 tpkts = dstats->tx_pkts;
97 tdrops = dstats->tx_drps;
98 rbytes = dstats->rx_bytes;
99 rpkts = dstats->rx_pkts;
100 } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
101 stats->tx_bytes += tbytes;
102 stats->tx_packets += tpkts;
103 stats->tx_dropped += tdrops;
104 stats->rx_bytes += rbytes;
105 stats->rx_packets += rpkts;
110 /* Local traffic destined to local address. Reinsert the packet to rx
111 * path, similar to loopback handling.
113 static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
114 struct dst_entry *dst)
120 skb_dst_set(skb, dst);
123 /* set pkt_type to avoid skb hitting packet taps twice -
124 * once on Tx and again in Rx processing
126 skb->pkt_type = PACKET_LOOPBACK;
128 skb->protocol = eth_type_trans(skb, dev);
130 if (likely(netif_rx(skb) == NET_RX_SUCCESS))
131 vrf_rx_stats(dev, len);
133 this_cpu_inc(dev->dstats->rx_drps);
138 #if IS_ENABLED(CONFIG_IPV6)
139 static int vrf_ip6_local_out(struct net *net, struct sock *sk,
144 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
145 sk, skb, NULL, skb_dst(skb)->dev, dst_output);
147 if (likely(err == 1))
148 err = dst_output(net, sk, skb);
153 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
154 struct net_device *dev)
156 const struct ipv6hdr *iph;
157 struct net *net = dev_net(skb->dev);
159 int ret = NET_XMIT_DROP;
160 struct dst_entry *dst;
161 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
163 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
168 memset(&fl6, 0, sizeof(fl6));
169 /* needed to match OIF rule */
170 fl6.flowi6_oif = dev->ifindex;
171 fl6.flowi6_iif = LOOPBACK_IFINDEX;
172 fl6.daddr = iph->daddr;
173 fl6.saddr = iph->saddr;
174 fl6.flowlabel = ip6_flowinfo(iph);
175 fl6.flowi6_mark = skb->mark;
176 fl6.flowi6_proto = iph->nexthdr;
177 fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
179 dst = ip6_route_output(net, NULL, &fl6);
185 /* if dst.dev is loopback or the VRF device again this is locally
186 * originated traffic destined to a local address. Short circuit
187 * to Rx path using our local dst
189 if (dst->dev == net->loopback_dev || dst->dev == dev) {
190 struct net_vrf *vrf = netdev_priv(dev);
191 struct rt6_info *rt6_local;
193 /* release looked up dst and use cached local dst */
198 rt6_local = rcu_dereference(vrf->rt6_local);
199 if (unlikely(!rt6_local)) {
204 /* Ordering issue: cached local dst is created on newlink
205 * before the IPv6 initialization. Using the local dst
206 * requires rt6i_idev to be set so make sure it is.
208 if (unlikely(!rt6_local->rt6i_idev)) {
209 rt6_local->rt6i_idev = in6_dev_get(dev);
210 if (!rt6_local->rt6i_idev) {
216 dst = &rt6_local->dst;
221 return vrf_local_xmit(skb, dev, &rt6_local->dst);
224 skb_dst_set(skb, dst);
226 /* strip the ethernet header added for pass through VRF device */
227 __skb_pull(skb, skb_network_offset(skb));
229 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
230 ret = vrf_ip6_local_out(net, skb->sk, skb);
231 if (unlikely(net_xmit_eval(ret)))
232 dev->stats.tx_errors++;
234 ret = NET_XMIT_SUCCESS;
238 vrf_tx_error(dev, skb);
239 return NET_XMIT_DROP;
242 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
243 struct net_device *dev)
245 vrf_tx_error(dev, skb);
246 return NET_XMIT_DROP;
250 /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
251 static int vrf_ip_local_out(struct net *net, struct sock *sk,
256 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
257 skb, NULL, skb_dst(skb)->dev, dst_output);
258 if (likely(err == 1))
259 err = dst_output(net, sk, skb);
264 static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
265 struct net_device *vrf_dev)
268 int ret = NET_XMIT_DROP;
270 struct net *net = dev_net(vrf_dev);
273 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
278 memset(&fl4, 0, sizeof(fl4));
279 /* needed to match OIF rule */
280 fl4.flowi4_oif = vrf_dev->ifindex;
281 fl4.flowi4_iif = LOOPBACK_IFINDEX;
282 fl4.flowi4_tos = RT_TOS(ip4h->tos);
283 fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
284 fl4.flowi4_proto = ip4h->protocol;
285 fl4.daddr = ip4h->daddr;
286 fl4.saddr = ip4h->saddr;
288 rt = ip_route_output_flow(net, &fl4, NULL);
292 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
299 /* if dst.dev is loopback or the VRF device again this is locally
300 * originated traffic destined to a local address. Short circuit
301 * to Rx path using our local dst
303 if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) {
304 struct net_vrf *vrf = netdev_priv(vrf_dev);
305 struct rtable *rth_local;
306 struct dst_entry *dst = NULL;
312 rth_local = rcu_dereference(vrf->rth_local);
313 if (likely(rth_local)) {
314 dst = &rth_local->dst;
323 return vrf_local_xmit(skb, vrf_dev, dst);
326 skb_dst_set(skb, &rt->dst);
328 /* strip the ethernet header added for pass through VRF device */
329 __skb_pull(skb, skb_network_offset(skb));
332 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
336 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
337 ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
338 if (unlikely(net_xmit_eval(ret)))
339 vrf_dev->stats.tx_errors++;
341 ret = NET_XMIT_SUCCESS;
346 vrf_tx_error(vrf_dev, skb);
350 static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
352 switch (skb->protocol) {
353 case htons(ETH_P_IP):
354 return vrf_process_v4_outbound(skb, dev);
355 case htons(ETH_P_IPV6):
356 return vrf_process_v6_outbound(skb, dev);
358 vrf_tx_error(dev, skb);
359 return NET_XMIT_DROP;
363 static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
366 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
368 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
369 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
371 u64_stats_update_begin(&dstats->syncp);
373 dstats->tx_bytes += len;
374 u64_stats_update_end(&dstats->syncp);
376 this_cpu_inc(dev->dstats->tx_drps);
382 #if IS_ENABLED(CONFIG_IPV6)
383 /* modelled after ip6_finish_output2 */
384 static int vrf_finish_output6(struct net *net, struct sock *sk,
387 struct dst_entry *dst = skb_dst(skb);
388 struct net_device *dev = dst->dev;
389 struct neighbour *neigh;
390 struct in6_addr *nexthop;
395 skb->protocol = htons(ETH_P_IPV6);
399 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
400 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
401 if (unlikely(!neigh))
402 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
403 if (!IS_ERR(neigh)) {
404 ret = dst_neigh_output(dst, neigh, skb);
405 rcu_read_unlock_bh();
408 rcu_read_unlock_bh();
410 IP6_INC_STATS(dev_net(dst->dev),
411 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
416 /* modelled after ip6_output */
417 static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
419 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
420 net, sk, skb, NULL, skb_dst(skb)->dev,
422 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
425 /* set dst on skb to send packet to us via dev_xmit path. Allows
426 * packet to go through device based features such as qdisc, netfilter
427 * hooks and packet sockets with skb->dev set to vrf device.
429 static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
433 struct net_vrf *vrf = netdev_priv(vrf_dev);
434 struct dst_entry *dst = NULL;
435 struct rt6_info *rt6;
437 /* don't divert link scope packets */
438 if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
443 rt6 = rcu_dereference(vrf->rt6);
451 if (unlikely(!dst)) {
452 vrf_tx_error(vrf_dev, skb);
457 skb_dst_set(skb, dst);
463 static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
465 struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
466 struct rt6_info *rt6_local = rtnl_dereference(vrf->rt6_local);
467 struct net *net = dev_net(dev);
468 struct dst_entry *dst;
470 RCU_INIT_POINTER(vrf->rt6, NULL);
471 RCU_INIT_POINTER(vrf->rt6_local, NULL);
474 /* move dev in dst's to loopback so this VRF device can be deleted
475 * - based on dst_ifdown
480 dst->dev = net->loopback_dev;
486 if (rt6_local->rt6i_idev) {
487 in6_dev_put(rt6_local->rt6i_idev);
488 rt6_local->rt6i_idev = NULL;
491 dst = &rt6_local->dst;
493 dst->dev = net->loopback_dev;
499 static int vrf_rt6_create(struct net_device *dev)
501 int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE;
502 struct net_vrf *vrf = netdev_priv(dev);
503 struct net *net = dev_net(dev);
504 struct fib6_table *rt6i_table;
505 struct rt6_info *rt6, *rt6_local;
508 /* IPv6 can be CONFIG enabled and then disabled runtime */
509 if (!ipv6_mod_enabled())
512 rt6i_table = fib6_new_table(net, vrf->tb_id);
516 /* create a dst for routing packets out a VRF device */
517 rt6 = ip6_dst_alloc(net, dev, flags);
523 rt6->rt6i_table = rt6i_table;
524 rt6->dst.output = vrf_output6;
526 /* create a dst for local routing - packets sent locally
527 * to local address via the VRF device as a loopback
529 rt6_local = ip6_dst_alloc(net, dev, flags);
531 dst_release(&rt6->dst);
535 dst_hold(&rt6_local->dst);
537 rt6_local->rt6i_idev = in6_dev_get(dev);
538 rt6_local->rt6i_flags = RTF_UP | RTF_NONEXTHOP | RTF_LOCAL;
539 rt6_local->rt6i_table = rt6i_table;
540 rt6_local->dst.input = ip6_input;
542 rcu_assign_pointer(vrf->rt6, rt6);
543 rcu_assign_pointer(vrf->rt6_local, rt6_local);
550 static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
557 static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
561 static int vrf_rt6_create(struct net_device *dev)
567 /* modelled after ip_finish_output2 */
568 static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
570 struct dst_entry *dst = skb_dst(skb);
571 struct rtable *rt = (struct rtable *)dst;
572 struct net_device *dev = dst->dev;
573 unsigned int hh_len = LL_RESERVED_SPACE(dev);
574 struct neighbour *neigh;
580 /* Be paranoid, rather than too clever. */
581 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
582 struct sk_buff *skb2;
584 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
590 skb_set_owner_w(skb2, skb->sk);
598 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
599 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
600 if (unlikely(!neigh))
601 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
602 if (!IS_ERR(neigh)) {
603 ret = dst_neigh_output(dst, neigh, skb);
604 rcu_read_unlock_bh();
608 rcu_read_unlock_bh();
610 vrf_tx_error(skb->dev, skb);
614 static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
616 struct net_device *dev = skb_dst(skb)->dev;
618 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
621 skb->protocol = htons(ETH_P_IP);
623 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
624 net, sk, skb, NULL, dev,
626 !(IPCB(skb)->flags & IPSKB_REROUTED));
629 /* set dst on skb to send packet to us via dev_xmit path. Allows
630 * packet to go through device based features such as qdisc, netfilter
631 * hooks and packet sockets with skb->dev set to vrf device.
633 static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
637 struct net_vrf *vrf = netdev_priv(vrf_dev);
638 struct dst_entry *dst = NULL;
643 rth = rcu_dereference(vrf->rth);
651 if (unlikely(!dst)) {
652 vrf_tx_error(vrf_dev, skb);
657 skb_dst_set(skb, dst);
662 /* called with rcu lock held */
663 static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
670 return vrf_ip_out(vrf_dev, sk, skb);
672 return vrf_ip6_out(vrf_dev, sk, skb);
679 static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
681 struct rtable *rth = rtnl_dereference(vrf->rth);
682 struct rtable *rth_local = rtnl_dereference(vrf->rth_local);
683 struct net *net = dev_net(dev);
684 struct dst_entry *dst;
686 RCU_INIT_POINTER(vrf->rth, NULL);
687 RCU_INIT_POINTER(vrf->rth_local, NULL);
690 /* move dev in dst's to loopback so this VRF device can be deleted
691 * - based on dst_ifdown
696 dst->dev = net->loopback_dev;
702 dst = &rth_local->dst;
704 dst->dev = net->loopback_dev;
710 static int vrf_rtable_create(struct net_device *dev)
712 struct net_vrf *vrf = netdev_priv(dev);
713 struct rtable *rth, *rth_local;
715 if (!fib_new_table(dev_net(dev), vrf->tb_id))
718 /* create a dst for routing packets out through a VRF device */
719 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
723 /* create a dst for local ingress routing - packets sent locally
724 * to local address via the VRF device as a loopback
726 rth_local = rt_dst_alloc(dev, RTCF_LOCAL, RTN_LOCAL, 1, 1, 0);
728 dst_release(&rth->dst);
732 rth->dst.output = vrf_output;
733 rth->rt_table_id = vrf->tb_id;
735 rth_local->rt_table_id = vrf->tb_id;
737 rcu_assign_pointer(vrf->rth, rth);
738 rcu_assign_pointer(vrf->rth_local, rth_local);
743 /**************************** device handling ********************/
745 /* cycle interface to flush neighbor cache and move routes across tables */
746 static void cycle_netdev(struct net_device *dev)
748 unsigned int flags = dev->flags;
751 if (!netif_running(dev))
754 ret = dev_change_flags(dev, flags & ~IFF_UP);
756 ret = dev_change_flags(dev, flags);
760 "Failed to cycle device %s; route tables might be wrong!\n",
765 static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
769 ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
773 port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
774 cycle_netdev(port_dev);
779 static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
781 if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev))
784 return do_vrf_add_slave(dev, port_dev);
787 /* inverse of do_vrf_add_slave */
788 static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
790 netdev_upper_dev_unlink(port_dev, dev);
791 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
793 cycle_netdev(port_dev);
798 static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
800 return do_vrf_del_slave(dev, port_dev);
803 static void vrf_dev_uninit(struct net_device *dev)
805 struct net_vrf *vrf = netdev_priv(dev);
807 vrf_rtable_release(dev, vrf);
808 vrf_rt6_release(dev, vrf);
810 free_percpu(dev->dstats);
814 static int vrf_dev_init(struct net_device *dev)
816 struct net_vrf *vrf = netdev_priv(dev);
818 dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
822 /* create the default dst which points back to us */
823 if (vrf_rtable_create(dev) != 0)
826 if (vrf_rt6_create(dev) != 0)
829 dev->flags = IFF_MASTER | IFF_NOARP;
831 /* MTU is irrelevant for VRF device; set to 64k similar to lo */
832 dev->mtu = 64 * 1024;
834 /* similarly, oper state is irrelevant; set to up to avoid confusion */
835 dev->operstate = IF_OPER_UP;
836 netdev_lockdep_set_classes(dev);
840 vrf_rtable_release(dev, vrf);
842 free_percpu(dev->dstats);
848 static const struct net_device_ops vrf_netdev_ops = {
849 .ndo_init = vrf_dev_init,
850 .ndo_uninit = vrf_dev_uninit,
851 .ndo_start_xmit = vrf_xmit,
852 .ndo_get_stats64 = vrf_get_stats64,
853 .ndo_add_slave = vrf_add_slave,
854 .ndo_del_slave = vrf_del_slave,
857 static u32 vrf_fib_table(const struct net_device *dev)
859 struct net_vrf *vrf = netdev_priv(dev);
864 static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
870 static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
872 struct net_device *dev)
874 struct net *net = dev_net(dev);
876 if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
877 skb = NULL; /* kfree_skb(skb) handled by nf code */
882 #if IS_ENABLED(CONFIG_IPV6)
883 /* neighbor handling is done with actual device; do not want
884 * to flip skb->dev for those ndisc packets. This really fails
885 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
888 static bool ipv6_ndisc_frame(const struct sk_buff *skb)
890 const struct ipv6hdr *iph = ipv6_hdr(skb);
893 if (iph->nexthdr == NEXTHDR_ICMP) {
894 const struct icmp6hdr *icmph;
895 struct icmp6hdr _icmph;
897 icmph = skb_header_pointer(skb, sizeof(*iph),
898 sizeof(_icmph), &_icmph);
902 switch (icmph->icmp6_type) {
903 case NDISC_ROUTER_SOLICITATION:
904 case NDISC_ROUTER_ADVERTISEMENT:
905 case NDISC_NEIGHBOUR_SOLICITATION:
906 case NDISC_NEIGHBOUR_ADVERTISEMENT:
917 static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
918 const struct net_device *dev,
923 struct net_vrf *vrf = netdev_priv(dev);
924 struct fib6_table *table = NULL;
925 struct rt6_info *rt6;
929 /* fib6_table does not have a refcnt and can not be freed */
930 rt6 = rcu_dereference(vrf->rt6);
932 table = rt6->rt6i_table;
939 return ip6_pol_route(net, table, ifindex, fl6, flags);
942 static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
945 const struct ipv6hdr *iph = ipv6_hdr(skb);
946 struct flowi6 fl6 = {
949 .flowlabel = ip6_flowinfo(iph),
950 .flowi6_mark = skb->mark,
951 .flowi6_proto = iph->nexthdr,
952 .flowi6_iif = ifindex,
954 struct net *net = dev_net(vrf_dev);
955 struct rt6_info *rt6;
957 rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex,
958 RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
962 if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
965 skb_dst_set(skb, &rt6->dst);
968 static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
971 int orig_iif = skb->skb_iif;
974 /* loopback traffic; do not push through packet taps again.
975 * Reset pkt_type for upper layers to process skb
977 if (skb->pkt_type == PACKET_LOOPBACK) {
979 skb->skb_iif = vrf_dev->ifindex;
980 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
981 skb->pkt_type = PACKET_HOST;
985 /* if packet is NDISC or addressed to multicast or link-local
986 * then keep the ingress interface
988 need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
989 if (!ipv6_ndisc_frame(skb) && !need_strict) {
990 vrf_rx_stats(vrf_dev, skb->len);
992 skb->skb_iif = vrf_dev->ifindex;
994 skb_push(skb, skb->mac_len);
995 dev_queue_xmit_nit(skb, vrf_dev);
996 skb_pull(skb, skb->mac_len);
998 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1002 vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
1004 skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
1010 static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1011 struct sk_buff *skb)
1017 static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
1018 struct sk_buff *skb)
1021 skb->skb_iif = vrf_dev->ifindex;
1022 IPCB(skb)->flags |= IPSKB_L3SLAVE;
1024 /* loopback traffic; do not push through packet taps again.
1025 * Reset pkt_type for upper layers to process skb
1027 if (skb->pkt_type == PACKET_LOOPBACK) {
1028 skb->pkt_type = PACKET_HOST;
1032 vrf_rx_stats(vrf_dev, skb->len);
1034 skb_push(skb, skb->mac_len);
1035 dev_queue_xmit_nit(skb, vrf_dev);
1036 skb_pull(skb, skb->mac_len);
1038 skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
1043 /* called with rcu lock held */
1044 static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
1045 struct sk_buff *skb,
1050 return vrf_ip_rcv(vrf_dev, skb);
1052 return vrf_ip6_rcv(vrf_dev, skb);
1058 #if IS_ENABLED(CONFIG_IPV6)
1059 /* send to link-local or multicast address via interface enslaved to
1060 * VRF device. Force lookup to VRF table without changing flow struct
1062 static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
1065 struct net *net = dev_net(dev);
1066 int flags = RT6_LOOKUP_F_IFACE;
1067 struct dst_entry *dst = NULL;
1068 struct rt6_info *rt;
1070 /* VRF device does not have a link-local address and
1071 * sending packets to link-local or mcast addresses over
1072 * a VRF device does not make sense
1074 if (fl6->flowi6_oif == dev->ifindex) {
1075 dst = &net->ipv6.ip6_null_entry->dst;
1080 if (!ipv6_addr_any(&fl6->saddr))
1081 flags |= RT6_LOOKUP_F_HAS_SADDR;
1083 rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags);
1091 static const struct l3mdev_ops vrf_l3mdev_ops = {
1092 .l3mdev_fib_table = vrf_fib_table,
1093 .l3mdev_l3_rcv = vrf_l3_rcv,
1094 .l3mdev_l3_out = vrf_l3_out,
1095 #if IS_ENABLED(CONFIG_IPV6)
1096 .l3mdev_link_scope_lookup = vrf_link_scope_lookup,
1100 static void vrf_get_drvinfo(struct net_device *dev,
1101 struct ethtool_drvinfo *info)
1103 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1104 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1107 static const struct ethtool_ops vrf_ethtool_ops = {
1108 .get_drvinfo = vrf_get_drvinfo,
1111 static inline size_t vrf_fib_rule_nl_size(void)
1115 sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
1116 sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */
1117 sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */
1122 static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1124 struct fib_rule_hdr *frh;
1125 struct nlmsghdr *nlh;
1126 struct sk_buff *skb;
1129 if (family == AF_INET6 && !ipv6_mod_enabled())
1132 skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
1136 nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
1138 goto nla_put_failure;
1140 /* rule only needs to appear once */
1141 nlh->nlmsg_flags |= NLM_F_EXCL;
1143 frh = nlmsg_data(nlh);
1144 memset(frh, 0, sizeof(*frh));
1145 frh->family = family;
1146 frh->action = FR_ACT_TO_TBL;
1148 if (nla_put_u8(skb, FRA_L3MDEV, 1))
1149 goto nla_put_failure;
1151 if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
1152 goto nla_put_failure;
1154 nlmsg_end(skb, nlh);
1156 /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1157 skb->sk = dev_net(dev)->rtnl;
1159 err = fib_nl_newrule(skb, nlh);
1163 err = fib_nl_delrule(skb, nlh);
1177 static int vrf_add_fib_rules(const struct net_device *dev)
1181 err = vrf_fib_rule(dev, AF_INET, true);
1185 err = vrf_fib_rule(dev, AF_INET6, true);
1192 vrf_fib_rule(dev, AF_INET, false);
1195 netdev_err(dev, "Failed to add FIB rules.\n");
1199 static void vrf_setup(struct net_device *dev)
1203 /* Initialize the device structure. */
1204 dev->netdev_ops = &vrf_netdev_ops;
1205 dev->l3mdev_ops = &vrf_l3mdev_ops;
1206 dev->ethtool_ops = &vrf_ethtool_ops;
1207 dev->destructor = free_netdev;
1209 /* Fill in device structure with ethernet-generic values. */
1210 eth_hw_addr_random(dev);
1212 /* don't acquire vrf device's netif_tx_lock when transmitting */
1213 dev->features |= NETIF_F_LLTX;
1215 /* don't allow vrf devices to change network namespaces. */
1216 dev->features |= NETIF_F_NETNS_LOCAL;
1218 /* does not make sense for a VLAN to be added to a vrf device */
1219 dev->features |= NETIF_F_VLAN_CHALLENGED;
1221 /* enable offload features */
1222 dev->features |= NETIF_F_GSO_SOFTWARE;
1223 dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
1224 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1226 dev->hw_features = dev->features;
1227 dev->hw_enc_features = dev->features;
1229 /* default to no qdisc; user can add if desired */
1230 dev->priv_flags |= IFF_NO_QUEUE;
1233 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
1235 if (tb[IFLA_ADDRESS]) {
1236 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1238 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1239 return -EADDRNOTAVAIL;
1244 static void vrf_dellink(struct net_device *dev, struct list_head *head)
1246 struct net_device *port_dev;
1247 struct list_head *iter;
1249 netdev_for_each_lower_dev(dev, port_dev, iter)
1250 vrf_del_slave(dev, port_dev);
1252 unregister_netdevice_queue(dev, head);
1255 static int vrf_newlink(struct net *src_net, struct net_device *dev,
1256 struct nlattr *tb[], struct nlattr *data[])
1258 struct net_vrf *vrf = netdev_priv(dev);
1259 bool *add_fib_rules;
1263 if (!data || !data[IFLA_VRF_TABLE])
1266 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
1267 if (vrf->tb_id == RT_TABLE_UNSPEC)
1270 dev->priv_flags |= IFF_L3MDEV_MASTER;
1272 err = register_netdevice(dev);
1277 add_fib_rules = net_generic(net, vrf_net_id);
1278 if (*add_fib_rules) {
1279 err = vrf_add_fib_rules(dev);
1281 unregister_netdevice(dev);
1284 *add_fib_rules = false;
1291 static size_t vrf_nl_getsize(const struct net_device *dev)
1293 return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
1296 static int vrf_fillinfo(struct sk_buff *skb,
1297 const struct net_device *dev)
1299 struct net_vrf *vrf = netdev_priv(dev);
1301 return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
1304 static size_t vrf_get_slave_size(const struct net_device *bond_dev,
1305 const struct net_device *slave_dev)
1307 return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */
1310 static int vrf_fill_slave_info(struct sk_buff *skb,
1311 const struct net_device *vrf_dev,
1312 const struct net_device *slave_dev)
1314 struct net_vrf *vrf = netdev_priv(vrf_dev);
1316 if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
1322 static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
1323 [IFLA_VRF_TABLE] = { .type = NLA_U32 },
1326 static struct rtnl_link_ops vrf_link_ops __read_mostly = {
1328 .priv_size = sizeof(struct net_vrf),
1330 .get_size = vrf_nl_getsize,
1331 .policy = vrf_nl_policy,
1332 .validate = vrf_validate,
1333 .fill_info = vrf_fillinfo,
1335 .get_slave_size = vrf_get_slave_size,
1336 .fill_slave_info = vrf_fill_slave_info,
1338 .newlink = vrf_newlink,
1339 .dellink = vrf_dellink,
1341 .maxtype = IFLA_VRF_MAX,
1344 static int vrf_device_event(struct notifier_block *unused,
1345 unsigned long event, void *ptr)
1347 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1349 /* only care about unregister events to drop slave references */
1350 if (event == NETDEV_UNREGISTER) {
1351 struct net_device *vrf_dev;
1353 if (!netif_is_l3_slave(dev))
1356 vrf_dev = netdev_master_upper_dev_get(dev);
1357 vrf_del_slave(vrf_dev, dev);
1363 static struct notifier_block vrf_notifier_block __read_mostly = {
1364 .notifier_call = vrf_device_event,
1367 /* Initialize per network namespace state */
1368 static int __net_init vrf_netns_init(struct net *net)
1370 bool *add_fib_rules = net_generic(net, vrf_net_id);
1372 *add_fib_rules = true;
1377 static struct pernet_operations vrf_net_ops __net_initdata = {
1378 .init = vrf_netns_init,
1380 .size = sizeof(bool),
1383 static int __init vrf_init_module(void)
1387 register_netdevice_notifier(&vrf_notifier_block);
1389 rc = register_pernet_subsys(&vrf_net_ops);
1393 rc = rtnl_link_register(&vrf_link_ops);
1395 unregister_pernet_subsys(&vrf_net_ops);
1402 unregister_netdevice_notifier(&vrf_notifier_block);
1406 module_init(vrf_init_module);
1407 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1408 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1409 MODULE_LICENSE("GPL");
1410 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1411 MODULE_VERSION(DRV_VERSION);