2 * vrf.c: device driver to encapsulate a VRF space
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
8 * Based on dummy, team and ipvlan drivers
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
21 #include <linux/init.h>
22 #include <linux/moduleparam.h>
23 #include <linux/netfilter.h>
24 #include <linux/rtnetlink.h>
25 #include <net/rtnetlink.h>
26 #include <linux/u64_stats_sync.h>
27 #include <linux/hashtable.h>
29 #include <linux/inetdevice.h>
32 #include <net/ip_fib.h>
33 #include <net/ip6_fib.h>
34 #include <net/ip6_route.h>
35 #include <net/route.h>
36 #include <net/addrconf.h>
37 #include <net/l3mdev.h>
38 #include <net/fib_rules.h>
39 #include <net/netns/generic.h>
41 #define DRV_NAME "vrf"
42 #define DRV_VERSION "1.0"
44 #define FIB_RULE_PREF 1000 /* default preference for FIB rules */
46 static unsigned int vrf_net_id;
49 struct rtable __rcu *rth;
50 struct rt6_info __rcu *rt6;
51 #if IS_ENABLED(CONFIG_IPV6)
52 struct fib6_table *fib6_table;
64 struct u64_stats_sync syncp;
67 static void vrf_rx_stats(struct net_device *dev, int len)
69 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
71 u64_stats_update_begin(&dstats->syncp);
73 dstats->rx_bytes += len;
74 u64_stats_update_end(&dstats->syncp);
77 static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
79 vrf_dev->stats.tx_errors++;
83 static void vrf_get_stats64(struct net_device *dev,
84 struct rtnl_link_stats64 *stats)
88 for_each_possible_cpu(i) {
89 const struct pcpu_dstats *dstats;
90 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
93 dstats = per_cpu_ptr(dev->dstats, i);
95 start = u64_stats_fetch_begin_irq(&dstats->syncp);
96 tbytes = dstats->tx_bytes;
97 tpkts = dstats->tx_pkts;
98 tdrops = dstats->tx_drps;
99 rbytes = dstats->rx_bytes;
100 rpkts = dstats->rx_pkts;
101 } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
102 stats->tx_bytes += tbytes;
103 stats->tx_packets += tpkts;
104 stats->tx_dropped += tdrops;
105 stats->rx_bytes += rbytes;
106 stats->rx_packets += rpkts;
110 /* by default VRF devices do not have a qdisc and are expected
111 * to be created with only a single queue.
113 static bool qdisc_tx_is_default(const struct net_device *dev)
115 struct netdev_queue *txq;
118 if (dev->num_tx_queues > 1)
121 txq = netdev_get_tx_queue(dev, 0);
122 qdisc = rcu_access_pointer(txq->qdisc);
124 return !qdisc->enqueue;
127 /* Local traffic destined to local address. Reinsert the packet to rx
128 * path, similar to loopback handling.
130 static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
131 struct dst_entry *dst)
137 skb_dst_set(skb, dst);
139 /* set pkt_type to avoid skb hitting packet taps twice -
140 * once on Tx and again in Rx processing
142 skb->pkt_type = PACKET_LOOPBACK;
144 skb->protocol = eth_type_trans(skb, dev);
146 if (likely(netif_rx(skb) == NET_RX_SUCCESS))
147 vrf_rx_stats(dev, len);
149 this_cpu_inc(dev->dstats->rx_drps);
154 #if IS_ENABLED(CONFIG_IPV6)
155 static int vrf_ip6_local_out(struct net *net, struct sock *sk,
160 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
161 sk, skb, NULL, skb_dst(skb)->dev, dst_output);
163 if (likely(err == 1))
164 err = dst_output(net, sk, skb);
169 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
170 struct net_device *dev)
172 const struct ipv6hdr *iph;
173 struct net *net = dev_net(skb->dev);
175 int ret = NET_XMIT_DROP;
176 struct dst_entry *dst;
177 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
179 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
184 memset(&fl6, 0, sizeof(fl6));
185 /* needed to match OIF rule */
186 fl6.flowi6_oif = dev->ifindex;
187 fl6.flowi6_iif = LOOPBACK_IFINDEX;
188 fl6.daddr = iph->daddr;
189 fl6.saddr = iph->saddr;
190 fl6.flowlabel = ip6_flowinfo(iph);
191 fl6.flowi6_mark = skb->mark;
192 fl6.flowi6_proto = iph->nexthdr;
193 fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
195 dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
196 if (IS_ERR(dst) || dst == dst_null)
201 /* if dst.dev is loopback or the VRF device again this is locally
202 * originated traffic destined to a local address. Short circuit
206 return vrf_local_xmit(skb, dev, dst);
208 skb_dst_set(skb, dst);
210 /* strip the ethernet header added for pass through VRF device */
211 __skb_pull(skb, skb_network_offset(skb));
213 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
214 ret = vrf_ip6_local_out(net, skb->sk, skb);
215 if (unlikely(net_xmit_eval(ret)))
216 dev->stats.tx_errors++;
218 ret = NET_XMIT_SUCCESS;
222 vrf_tx_error(dev, skb);
223 return NET_XMIT_DROP;
226 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
227 struct net_device *dev)
229 vrf_tx_error(dev, skb);
230 return NET_XMIT_DROP;
234 /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
235 static int vrf_ip_local_out(struct net *net, struct sock *sk,
240 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
241 skb, NULL, skb_dst(skb)->dev, dst_output);
242 if (likely(err == 1))
243 err = dst_output(net, sk, skb);
248 static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
249 struct net_device *vrf_dev)
252 int ret = NET_XMIT_DROP;
254 struct net *net = dev_net(vrf_dev);
257 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
262 memset(&fl4, 0, sizeof(fl4));
263 /* needed to match OIF rule */
264 fl4.flowi4_oif = vrf_dev->ifindex;
265 fl4.flowi4_iif = LOOPBACK_IFINDEX;
266 fl4.flowi4_tos = RT_TOS(ip4h->tos);
267 fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
268 fl4.flowi4_proto = ip4h->protocol;
269 fl4.daddr = ip4h->daddr;
270 fl4.saddr = ip4h->saddr;
272 rt = ip_route_output_flow(net, &fl4, NULL);
278 /* if dst.dev is loopback or the VRF device again this is locally
279 * originated traffic destined to a local address. Short circuit
282 if (rt->dst.dev == vrf_dev)
283 return vrf_local_xmit(skb, vrf_dev, &rt->dst);
285 skb_dst_set(skb, &rt->dst);
287 /* strip the ethernet header added for pass through VRF device */
288 __skb_pull(skb, skb_network_offset(skb));
291 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
295 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
296 ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
297 if (unlikely(net_xmit_eval(ret)))
298 vrf_dev->stats.tx_errors++;
300 ret = NET_XMIT_SUCCESS;
305 vrf_tx_error(vrf_dev, skb);
309 static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
311 switch (skb->protocol) {
312 case htons(ETH_P_IP):
313 return vrf_process_v4_outbound(skb, dev);
314 case htons(ETH_P_IPV6):
315 return vrf_process_v6_outbound(skb, dev);
317 vrf_tx_error(dev, skb);
318 return NET_XMIT_DROP;
322 static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
325 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
327 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
328 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
330 u64_stats_update_begin(&dstats->syncp);
332 dstats->tx_bytes += len;
333 u64_stats_update_end(&dstats->syncp);
335 this_cpu_inc(dev->dstats->tx_drps);
341 static void vrf_finish_direct(struct sk_buff *skb)
343 struct net_device *vrf_dev = skb->dev;
345 if (!list_empty(&vrf_dev->ptype_all) &&
346 likely(skb_headroom(skb) >= ETH_HLEN)) {
347 struct ethhdr *eth = skb_push(skb, ETH_HLEN);
349 ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
350 eth_zero_addr(eth->h_dest);
351 eth->h_proto = skb->protocol;
354 dev_queue_xmit_nit(skb, vrf_dev);
355 rcu_read_unlock_bh();
357 skb_pull(skb, ETH_HLEN);
360 /* reset skb device */
364 #if IS_ENABLED(CONFIG_IPV6)
365 /* modelled after ip6_finish_output2 */
366 static int vrf_finish_output6(struct net *net, struct sock *sk,
369 struct dst_entry *dst = skb_dst(skb);
370 struct net_device *dev = dst->dev;
371 struct neighbour *neigh;
372 struct in6_addr *nexthop;
377 skb->protocol = htons(ETH_P_IPV6);
381 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
382 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
383 if (unlikely(!neigh))
384 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
385 if (!IS_ERR(neigh)) {
386 sock_confirm_neigh(skb, neigh);
387 ret = neigh_output(neigh, skb);
388 rcu_read_unlock_bh();
391 rcu_read_unlock_bh();
393 IP6_INC_STATS(dev_net(dst->dev),
394 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
399 /* modelled after ip6_output */
400 static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
402 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
403 net, sk, skb, NULL, skb_dst(skb)->dev,
405 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
408 /* set dst on skb to send packet to us via dev_xmit path. Allows
409 * packet to go through device based features such as qdisc, netfilter
410 * hooks and packet sockets with skb->dev set to vrf device.
412 static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
415 struct net_vrf *vrf = netdev_priv(vrf_dev);
416 struct dst_entry *dst = NULL;
417 struct rt6_info *rt6;
421 rt6 = rcu_dereference(vrf->rt6);
429 if (unlikely(!dst)) {
430 vrf_tx_error(vrf_dev, skb);
435 skb_dst_set(skb, dst);
440 static int vrf_output6_direct_finish(struct net *net, struct sock *sk,
443 vrf_finish_direct(skb);
445 return vrf_ip6_local_out(net, sk, skb);
448 static int vrf_output6_direct(struct net *net, struct sock *sk,
453 skb->protocol = htons(ETH_P_IPV6);
455 if (!(IPCB(skb)->flags & IPSKB_REROUTED))
456 err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb,
457 NULL, skb->dev, vrf_output6_direct_finish);
459 if (likely(err == 1))
460 vrf_finish_direct(skb);
465 static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk,
470 err = vrf_output6_direct(net, sk, skb);
471 if (likely(err == 1))
472 err = vrf_ip6_local_out(net, sk, skb);
477 static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
481 struct net *net = dev_net(vrf_dev);
486 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
487 skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
489 if (likely(err == 1))
490 err = vrf_output6_direct(net, sk, skb);
492 if (likely(err == 1))
498 static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
502 /* don't divert link scope packets */
503 if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
506 if (qdisc_tx_is_default(vrf_dev) ||
507 IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
508 return vrf_ip6_out_direct(vrf_dev, sk, skb);
510 return vrf_ip6_out_redirect(vrf_dev, skb);
514 static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
516 struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
517 struct net *net = dev_net(dev);
518 struct dst_entry *dst;
520 RCU_INIT_POINTER(vrf->rt6, NULL);
523 /* move dev in dst's to loopback so this VRF device can be deleted
524 * - based on dst_ifdown
529 dst->dev = net->loopback_dev;
535 static int vrf_rt6_create(struct net_device *dev)
537 int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM;
538 struct net_vrf *vrf = netdev_priv(dev);
539 struct net *net = dev_net(dev);
540 struct rt6_info *rt6;
543 /* IPv6 can be CONFIG enabled and then disabled runtime */
544 if (!ipv6_mod_enabled())
547 vrf->fib6_table = fib6_new_table(net, vrf->tb_id);
548 if (!vrf->fib6_table)
551 /* create a dst for routing packets out a VRF device */
552 rt6 = ip6_dst_alloc(net, dev, flags);
556 rt6->dst.output = vrf_output6;
558 rcu_assign_pointer(vrf->rt6, rt6);
565 static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
572 static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
576 static int vrf_rt6_create(struct net_device *dev)
582 /* modelled after ip_finish_output2 */
583 static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
585 struct dst_entry *dst = skb_dst(skb);
586 struct rtable *rt = (struct rtable *)dst;
587 struct net_device *dev = dst->dev;
588 unsigned int hh_len = LL_RESERVED_SPACE(dev);
589 struct neighbour *neigh;
595 /* Be paranoid, rather than too clever. */
596 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
597 struct sk_buff *skb2;
599 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
605 skb_set_owner_w(skb2, skb->sk);
613 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
614 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
615 if (unlikely(!neigh))
616 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
617 if (!IS_ERR(neigh)) {
618 sock_confirm_neigh(skb, neigh);
619 ret = neigh_output(neigh, skb);
620 rcu_read_unlock_bh();
624 rcu_read_unlock_bh();
626 vrf_tx_error(skb->dev, skb);
630 static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
632 struct net_device *dev = skb_dst(skb)->dev;
634 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
637 skb->protocol = htons(ETH_P_IP);
639 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
640 net, sk, skb, NULL, dev,
642 !(IPCB(skb)->flags & IPSKB_REROUTED));
645 /* set dst on skb to send packet to us via dev_xmit path. Allows
646 * packet to go through device based features such as qdisc, netfilter
647 * hooks and packet sockets with skb->dev set to vrf device.
649 static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
652 struct net_vrf *vrf = netdev_priv(vrf_dev);
653 struct dst_entry *dst = NULL;
658 rth = rcu_dereference(vrf->rth);
666 if (unlikely(!dst)) {
667 vrf_tx_error(vrf_dev, skb);
672 skb_dst_set(skb, dst);
677 static int vrf_output_direct_finish(struct net *net, struct sock *sk,
680 vrf_finish_direct(skb);
682 return vrf_ip_local_out(net, sk, skb);
685 static int vrf_output_direct(struct net *net, struct sock *sk,
690 skb->protocol = htons(ETH_P_IP);
692 if (!(IPCB(skb)->flags & IPSKB_REROUTED))
693 err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb,
694 NULL, skb->dev, vrf_output_direct_finish);
696 if (likely(err == 1))
697 vrf_finish_direct(skb);
702 static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk,
707 err = vrf_output_direct(net, sk, skb);
708 if (likely(err == 1))
709 err = vrf_ip_local_out(net, sk, skb);
714 static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
718 struct net *net = dev_net(vrf_dev);
723 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
724 skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
726 if (likely(err == 1))
727 err = vrf_output_direct(net, sk, skb);
729 if (likely(err == 1))
735 static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
739 /* don't divert multicast or local broadcast */
740 if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
741 ipv4_is_lbcast(ip_hdr(skb)->daddr))
744 if (qdisc_tx_is_default(vrf_dev) ||
745 IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
746 return vrf_ip_out_direct(vrf_dev, sk, skb);
748 return vrf_ip_out_redirect(vrf_dev, skb);
751 /* called with rcu lock held */
752 static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
759 return vrf_ip_out(vrf_dev, sk, skb);
761 return vrf_ip6_out(vrf_dev, sk, skb);
768 static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
770 struct rtable *rth = rtnl_dereference(vrf->rth);
771 struct net *net = dev_net(dev);
772 struct dst_entry *dst;
774 RCU_INIT_POINTER(vrf->rth, NULL);
777 /* move dev in dst's to loopback so this VRF device can be deleted
778 * - based on dst_ifdown
783 dst->dev = net->loopback_dev;
789 static int vrf_rtable_create(struct net_device *dev)
791 struct net_vrf *vrf = netdev_priv(dev);
794 if (!fib_new_table(dev_net(dev), vrf->tb_id))
797 /* create a dst for routing packets out through a VRF device */
798 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
802 rth->dst.output = vrf_output;
804 rcu_assign_pointer(vrf->rth, rth);
809 /**************************** device handling ********************/
811 /* cycle interface to flush neighbor cache and move routes across tables */
812 static void cycle_netdev(struct net_device *dev)
814 unsigned int flags = dev->flags;
817 if (!netif_running(dev))
820 ret = dev_change_flags(dev, flags & ~IFF_UP);
822 ret = dev_change_flags(dev, flags);
826 "Failed to cycle device %s; route tables might be wrong!\n",
831 static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
832 struct netlink_ext_ack *extack)
836 /* do not allow loopback device to be enslaved to a VRF.
837 * The vrf device acts as the loopback for the vrf.
839 if (port_dev == dev_net(dev)->loopback_dev) {
840 NL_SET_ERR_MSG(extack,
841 "Can not enslave loopback device to a VRF");
845 port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
846 ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL, extack);
850 cycle_netdev(port_dev);
855 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
859 static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
860 struct netlink_ext_ack *extack)
862 if (netif_is_l3_master(port_dev)) {
863 NL_SET_ERR_MSG(extack,
864 "Can not enslave an L3 master device to a VRF");
868 if (netif_is_l3_slave(port_dev))
871 return do_vrf_add_slave(dev, port_dev, extack);
874 /* inverse of do_vrf_add_slave */
875 static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
877 netdev_upper_dev_unlink(port_dev, dev);
878 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
880 cycle_netdev(port_dev);
885 static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
887 return do_vrf_del_slave(dev, port_dev);
890 static void vrf_dev_uninit(struct net_device *dev)
892 struct net_vrf *vrf = netdev_priv(dev);
894 vrf_rtable_release(dev, vrf);
895 vrf_rt6_release(dev, vrf);
897 free_percpu(dev->dstats);
901 static int vrf_dev_init(struct net_device *dev)
903 struct net_vrf *vrf = netdev_priv(dev);
905 dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
909 /* create the default dst which points back to us */
910 if (vrf_rtable_create(dev) != 0)
913 if (vrf_rt6_create(dev) != 0)
916 dev->flags = IFF_MASTER | IFF_NOARP;
918 /* MTU is irrelevant for VRF device; set to 64k similar to lo */
919 dev->mtu = 64 * 1024;
921 /* similarly, oper state is irrelevant; set to up to avoid confusion */
922 dev->operstate = IF_OPER_UP;
923 netdev_lockdep_set_classes(dev);
927 vrf_rtable_release(dev, vrf);
929 free_percpu(dev->dstats);
935 static const struct net_device_ops vrf_netdev_ops = {
936 .ndo_init = vrf_dev_init,
937 .ndo_uninit = vrf_dev_uninit,
938 .ndo_start_xmit = vrf_xmit,
939 .ndo_get_stats64 = vrf_get_stats64,
940 .ndo_add_slave = vrf_add_slave,
941 .ndo_del_slave = vrf_del_slave,
944 static u32 vrf_fib_table(const struct net_device *dev)
946 struct net_vrf *vrf = netdev_priv(dev);
951 static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
957 static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
959 struct net_device *dev)
961 struct net *net = dev_net(dev);
963 if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
964 skb = NULL; /* kfree_skb(skb) handled by nf code */
969 #if IS_ENABLED(CONFIG_IPV6)
970 /* neighbor handling is done with actual device; do not want
971 * to flip skb->dev for those ndisc packets. This really fails
972 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
975 static bool ipv6_ndisc_frame(const struct sk_buff *skb)
977 const struct ipv6hdr *iph = ipv6_hdr(skb);
980 if (iph->nexthdr == NEXTHDR_ICMP) {
981 const struct icmp6hdr *icmph;
982 struct icmp6hdr _icmph;
984 icmph = skb_header_pointer(skb, sizeof(*iph),
985 sizeof(_icmph), &_icmph);
989 switch (icmph->icmp6_type) {
990 case NDISC_ROUTER_SOLICITATION:
991 case NDISC_ROUTER_ADVERTISEMENT:
992 case NDISC_NEIGHBOUR_SOLICITATION:
993 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1004 static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
1005 const struct net_device *dev,
1008 const struct sk_buff *skb,
1011 struct net_vrf *vrf = netdev_priv(dev);
1013 return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags);
1016 static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
1019 const struct ipv6hdr *iph = ipv6_hdr(skb);
1020 struct flowi6 fl6 = {
1021 .flowi6_iif = ifindex,
1022 .flowi6_mark = skb->mark,
1023 .flowi6_proto = iph->nexthdr,
1024 .daddr = iph->daddr,
1025 .saddr = iph->saddr,
1026 .flowlabel = ip6_flowinfo(iph),
1028 struct net *net = dev_net(vrf_dev);
1029 struct rt6_info *rt6;
1031 rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb,
1032 RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
1036 if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
1039 skb_dst_set(skb, &rt6->dst);
1042 static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1043 struct sk_buff *skb)
1045 int orig_iif = skb->skb_iif;
1048 /* loopback traffic; do not push through packet taps again.
1049 * Reset pkt_type for upper layers to process skb
1051 if (skb->pkt_type == PACKET_LOOPBACK) {
1053 skb->skb_iif = vrf_dev->ifindex;
1054 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1055 skb->pkt_type = PACKET_HOST;
1059 /* if packet is NDISC or addressed to multicast or link-local
1060 * then keep the ingress interface
1062 need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
1063 if (!ipv6_ndisc_frame(skb) && !need_strict) {
1064 vrf_rx_stats(vrf_dev, skb->len);
1066 skb->skb_iif = vrf_dev->ifindex;
1068 if (!list_empty(&vrf_dev->ptype_all)) {
1069 skb_push(skb, skb->mac_len);
1070 dev_queue_xmit_nit(skb, vrf_dev);
1071 skb_pull(skb, skb->mac_len);
1074 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1078 vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
1080 skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
1086 static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1087 struct sk_buff *skb)
1093 static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
1094 struct sk_buff *skb)
1097 skb->skb_iif = vrf_dev->ifindex;
1098 IPCB(skb)->flags |= IPSKB_L3SLAVE;
1100 if (ipv4_is_multicast(ip_hdr(skb)->daddr))
1103 /* loopback traffic; do not push through packet taps again.
1104 * Reset pkt_type for upper layers to process skb
1106 if (skb->pkt_type == PACKET_LOOPBACK) {
1107 skb->pkt_type = PACKET_HOST;
1111 vrf_rx_stats(vrf_dev, skb->len);
1113 if (!list_empty(&vrf_dev->ptype_all)) {
1114 skb_push(skb, skb->mac_len);
1115 dev_queue_xmit_nit(skb, vrf_dev);
1116 skb_pull(skb, skb->mac_len);
1119 skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
1124 /* called with rcu lock held */
1125 static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
1126 struct sk_buff *skb,
1131 return vrf_ip_rcv(vrf_dev, skb);
1133 return vrf_ip6_rcv(vrf_dev, skb);
1139 #if IS_ENABLED(CONFIG_IPV6)
1140 /* send to link-local or multicast address via interface enslaved to
1141 * VRF device. Force lookup to VRF table without changing flow struct
1143 static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
1146 struct net *net = dev_net(dev);
1147 int flags = RT6_LOOKUP_F_IFACE;
1148 struct dst_entry *dst = NULL;
1149 struct rt6_info *rt;
1151 /* VRF device does not have a link-local address and
1152 * sending packets to link-local or mcast addresses over
1153 * a VRF device does not make sense
1155 if (fl6->flowi6_oif == dev->ifindex) {
1156 dst = &net->ipv6.ip6_null_entry->dst;
1161 if (!ipv6_addr_any(&fl6->saddr))
1162 flags |= RT6_LOOKUP_F_HAS_SADDR;
1164 rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags);
1172 static const struct l3mdev_ops vrf_l3mdev_ops = {
1173 .l3mdev_fib_table = vrf_fib_table,
1174 .l3mdev_l3_rcv = vrf_l3_rcv,
1175 .l3mdev_l3_out = vrf_l3_out,
1176 #if IS_ENABLED(CONFIG_IPV6)
1177 .l3mdev_link_scope_lookup = vrf_link_scope_lookup,
1181 static void vrf_get_drvinfo(struct net_device *dev,
1182 struct ethtool_drvinfo *info)
1184 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1185 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1188 static const struct ethtool_ops vrf_ethtool_ops = {
1189 .get_drvinfo = vrf_get_drvinfo,
1192 static inline size_t vrf_fib_rule_nl_size(void)
1196 sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
1197 sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */
1198 sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */
1199 sz += nla_total_size(sizeof(u8)); /* FRA_PROTOCOL */
1204 static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1206 struct fib_rule_hdr *frh;
1207 struct nlmsghdr *nlh;
1208 struct sk_buff *skb;
1211 if (family == AF_INET6 && !ipv6_mod_enabled())
1214 skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
1218 nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
1220 goto nla_put_failure;
1222 /* rule only needs to appear once */
1223 nlh->nlmsg_flags |= NLM_F_EXCL;
1225 frh = nlmsg_data(nlh);
1226 memset(frh, 0, sizeof(*frh));
1227 frh->family = family;
1228 frh->action = FR_ACT_TO_TBL;
1230 if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL))
1231 goto nla_put_failure;
1233 if (nla_put_u8(skb, FRA_L3MDEV, 1))
1234 goto nla_put_failure;
1236 if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
1237 goto nla_put_failure;
1239 nlmsg_end(skb, nlh);
1241 /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1242 skb->sk = dev_net(dev)->rtnl;
1244 err = fib_nl_newrule(skb, nlh, NULL);
1248 err = fib_nl_delrule(skb, nlh, NULL);
1262 static int vrf_add_fib_rules(const struct net_device *dev)
1266 err = vrf_fib_rule(dev, AF_INET, true);
1270 err = vrf_fib_rule(dev, AF_INET6, true);
1274 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1275 err = vrf_fib_rule(dev, RTNL_FAMILY_IPMR, true);
1282 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1284 vrf_fib_rule(dev, AF_INET6, false);
1288 vrf_fib_rule(dev, AF_INET, false);
1291 netdev_err(dev, "Failed to add FIB rules.\n");
1295 static void vrf_setup(struct net_device *dev)
1299 /* Initialize the device structure. */
1300 dev->netdev_ops = &vrf_netdev_ops;
1301 dev->l3mdev_ops = &vrf_l3mdev_ops;
1302 dev->ethtool_ops = &vrf_ethtool_ops;
1303 dev->needs_free_netdev = true;
1305 /* Fill in device structure with ethernet-generic values. */
1306 eth_hw_addr_random(dev);
1308 /* don't acquire vrf device's netif_tx_lock when transmitting */
1309 dev->features |= NETIF_F_LLTX;
1311 /* don't allow vrf devices to change network namespaces. */
1312 dev->features |= NETIF_F_NETNS_LOCAL;
1314 /* does not make sense for a VLAN to be added to a vrf device */
1315 dev->features |= NETIF_F_VLAN_CHALLENGED;
1317 /* enable offload features */
1318 dev->features |= NETIF_F_GSO_SOFTWARE;
1319 dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC;
1320 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1322 dev->hw_features = dev->features;
1323 dev->hw_enc_features = dev->features;
1325 /* default to no qdisc; user can add if desired */
1326 dev->priv_flags |= IFF_NO_QUEUE;
1327 dev->priv_flags |= IFF_NO_RX_HANDLER;
1330 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
1331 struct netlink_ext_ack *extack)
1333 if (tb[IFLA_ADDRESS]) {
1334 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1335 NL_SET_ERR_MSG(extack, "Invalid hardware address");
1338 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1339 NL_SET_ERR_MSG(extack, "Invalid hardware address");
1340 return -EADDRNOTAVAIL;
1346 static void vrf_dellink(struct net_device *dev, struct list_head *head)
1348 struct net_device *port_dev;
1349 struct list_head *iter;
1351 netdev_for_each_lower_dev(dev, port_dev, iter)
1352 vrf_del_slave(dev, port_dev);
1354 unregister_netdevice_queue(dev, head);
1357 static int vrf_newlink(struct net *src_net, struct net_device *dev,
1358 struct nlattr *tb[], struct nlattr *data[],
1359 struct netlink_ext_ack *extack)
1361 struct net_vrf *vrf = netdev_priv(dev);
1362 bool *add_fib_rules;
1366 if (!data || !data[IFLA_VRF_TABLE]) {
1367 NL_SET_ERR_MSG(extack, "VRF table id is missing");
1371 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
1372 if (vrf->tb_id == RT_TABLE_UNSPEC) {
1373 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE],
1374 "Invalid VRF table id");
1378 dev->priv_flags |= IFF_L3MDEV_MASTER;
1380 err = register_netdevice(dev);
1385 add_fib_rules = net_generic(net, vrf_net_id);
1386 if (*add_fib_rules) {
1387 err = vrf_add_fib_rules(dev);
1389 unregister_netdevice(dev);
1392 *add_fib_rules = false;
1399 static size_t vrf_nl_getsize(const struct net_device *dev)
1401 return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
1404 static int vrf_fillinfo(struct sk_buff *skb,
1405 const struct net_device *dev)
1407 struct net_vrf *vrf = netdev_priv(dev);
1409 return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
1412 static size_t vrf_get_slave_size(const struct net_device *bond_dev,
1413 const struct net_device *slave_dev)
1415 return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */
1418 static int vrf_fill_slave_info(struct sk_buff *skb,
1419 const struct net_device *vrf_dev,
1420 const struct net_device *slave_dev)
1422 struct net_vrf *vrf = netdev_priv(vrf_dev);
1424 if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
1430 static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
1431 [IFLA_VRF_TABLE] = { .type = NLA_U32 },
1434 static struct rtnl_link_ops vrf_link_ops __read_mostly = {
1436 .priv_size = sizeof(struct net_vrf),
1438 .get_size = vrf_nl_getsize,
1439 .policy = vrf_nl_policy,
1440 .validate = vrf_validate,
1441 .fill_info = vrf_fillinfo,
1443 .get_slave_size = vrf_get_slave_size,
1444 .fill_slave_info = vrf_fill_slave_info,
1446 .newlink = vrf_newlink,
1447 .dellink = vrf_dellink,
1449 .maxtype = IFLA_VRF_MAX,
1452 static int vrf_device_event(struct notifier_block *unused,
1453 unsigned long event, void *ptr)
1455 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1457 /* only care about unregister events to drop slave references */
1458 if (event == NETDEV_UNREGISTER) {
1459 struct net_device *vrf_dev;
1461 if (!netif_is_l3_slave(dev))
1464 vrf_dev = netdev_master_upper_dev_get(dev);
1465 vrf_del_slave(vrf_dev, dev);
1471 static struct notifier_block vrf_notifier_block __read_mostly = {
1472 .notifier_call = vrf_device_event,
1475 /* Initialize per network namespace state */
1476 static int __net_init vrf_netns_init(struct net *net)
1478 bool *add_fib_rules = net_generic(net, vrf_net_id);
1480 *add_fib_rules = true;
1485 static struct pernet_operations vrf_net_ops __net_initdata = {
1486 .init = vrf_netns_init,
1488 .size = sizeof(bool),
1491 static int __init vrf_init_module(void)
1495 register_netdevice_notifier(&vrf_notifier_block);
1497 rc = register_pernet_subsys(&vrf_net_ops);
1501 rc = rtnl_link_register(&vrf_link_ops);
1503 unregister_pernet_subsys(&vrf_net_ops);
1510 unregister_netdevice_notifier(&vrf_notifier_block);
1514 module_init(vrf_init_module);
1515 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1516 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1517 MODULE_LICENSE("GPL");
1518 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1519 MODULE_VERSION(DRV_VERSION);