1 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of
6 * the License, or (at your option) any later version.
12 static u32 ipvlan_jhash_secret __read_mostly;
14 void ipvlan_init_secret(void)
16 net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
19 static void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
20 unsigned int len, bool success, bool mcast)
25 if (likely(success)) {
26 struct ipvl_pcpu_stats *pcptr;
28 pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
29 u64_stats_update_begin(&pcptr->syncp);
31 pcptr->rx_bytes += len;
34 u64_stats_update_end(&pcptr->syncp);
36 this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
40 static u8 ipvlan_get_v6_hash(const void *iaddr)
42 const struct in6_addr *ip6_addr = iaddr;
44 return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
48 static u8 ipvlan_get_v4_hash(const void *iaddr)
50 const struct in_addr *ip4_addr = iaddr;
52 return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
56 static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
57 const void *iaddr, bool is_v6)
59 struct ipvl_addr *addr;
62 hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
63 ipvlan_get_v4_hash(iaddr);
64 hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) {
65 if (is_v6 && addr->atype == IPVL_IPV6 &&
66 ipv6_addr_equal(&addr->ip6addr, iaddr))
68 else if (!is_v6 && addr->atype == IPVL_IPV4 &&
69 addr->ip4addr.s_addr ==
70 ((struct in_addr *)iaddr)->s_addr)
76 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
78 struct ipvl_port *port = ipvlan->port;
81 hash = (addr->atype == IPVL_IPV6) ?
82 ipvlan_get_v6_hash(&addr->ip6addr) :
83 ipvlan_get_v4_hash(&addr->ip4addr);
84 if (hlist_unhashed(&addr->hlnode))
85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
88 void ipvlan_ht_addr_del(struct ipvl_addr *addr)
90 hlist_del_init_rcu(&addr->hlnode);
93 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
94 const void *iaddr, bool is_v6)
96 struct ipvl_addr *addr;
98 list_for_each_entry(addr, &ipvlan->addrs, anode) {
99 if ((is_v6 && addr->atype == IPVL_IPV6 &&
100 ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
101 (!is_v6 && addr->atype == IPVL_IPV4 &&
102 addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
108 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
110 struct ipvl_dev *ipvlan;
114 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
115 if (ipvlan_find_addr(ipvlan, iaddr, is_v6))
121 static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
125 switch (skb->protocol) {
126 case htons(ETH_P_ARP): {
129 if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
137 case htons(ETH_P_IP): {
141 if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
145 pktlen = ntohs(ip4h->tot_len);
146 if (ip4h->ihl < 5 || ip4h->version != 4)
148 if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
155 case htons(ETH_P_IPV6): {
156 struct ipv6hdr *ip6h;
158 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
161 ip6h = ipv6_hdr(skb);
162 if (ip6h->version != 6)
167 /* Only Neighbour Solicitation pkts need different treatment */
168 if (ipv6_addr_any(&ip6h->saddr) &&
169 ip6h->nexthdr == NEXTHDR_ICMP) {
182 unsigned int ipvlan_mac_hash(const unsigned char *addr)
184 u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
185 ipvlan_jhash_secret);
187 return hash & IPVLAN_MAC_FILTER_MASK;
190 void ipvlan_process_multicast(struct work_struct *work)
192 struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
194 struct ipvl_dev *ipvlan;
195 struct sk_buff *skb, *nskb;
196 struct sk_buff_head list;
198 unsigned int mac_hash;
203 __skb_queue_head_init(&list);
205 spin_lock_bh(&port->backlog.lock);
206 skb_queue_splice_tail_init(&port->backlog, &list);
207 spin_unlock_bh(&port->backlog.lock);
209 while ((skb = __skb_dequeue(&list)) != NULL) {
211 hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr);
212 mac_hash = ipvlan_mac_hash(ethh->h_dest);
214 if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
215 pkt_type = PACKET_BROADCAST;
217 pkt_type = PACKET_MULTICAST;
221 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
222 if (hlocal && (ipvlan->dev == skb->dev)) {
226 if (!test_bit(mac_hash, ipvlan->mac_filters))
230 len = skb->len + ETH_HLEN;
231 nskb = skb_clone(skb, GFP_ATOMIC);
235 nskb->pkt_type = pkt_type;
236 nskb->dev = ipvlan->dev;
238 ret = dev_forward_skb(ipvlan->dev, nskb);
240 ret = netif_rx(nskb);
242 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
247 /* If the packet originated here, send it out. */
248 skb->dev = port->dev;
249 skb->pkt_type = pkt_type;
258 static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
263 xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
265 skb_scrub_packet(skb, xnet);
270 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
273 struct ipvl_dev *ipvlan = addr->master;
274 struct net_device *dev = ipvlan->dev;
276 rx_handler_result_t ret = RX_HANDLER_CONSUMED;
277 bool success = false;
278 struct sk_buff *skb = *pskb;
280 len = skb->len + ETH_HLEN;
281 /* Only packets exchanged between two local slaves need to have
282 * device-up check as well as skb-share check.
285 if (unlikely(!(dev->flags & IFF_UP))) {
290 skb = skb_share_check(skb, GFP_ATOMIC);
296 ipvlan_skb_crossing_ns(skb, dev);
299 skb->pkt_type = PACKET_HOST;
300 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
303 if (!ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
304 ipvlan->phy_dev->dev_addr))
305 skb->pkt_type = PACKET_OTHERHOST;
307 ret = RX_HANDLER_ANOTHER;
312 ipvlan_count_rx(ipvlan, len, success, false);
316 static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port,
317 void *lyr3h, int addr_type,
320 struct ipvl_addr *addr = NULL;
322 if (addr_type == IPVL_IPV6) {
323 struct ipv6hdr *ip6h;
324 struct in6_addr *i6addr;
326 ip6h = (struct ipv6hdr *)lyr3h;
327 i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
328 addr = ipvlan_ht_addr_lookup(port, i6addr, true);
329 } else if (addr_type == IPVL_ICMPV6) {
331 struct in6_addr *i6addr;
333 /* Make sure that the NeighborSolicitation ICMPv6 packets
334 * are handled to avoid DAD issue.
336 ndmh = (struct nd_msg *)lyr3h;
337 if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
338 i6addr = &ndmh->target;
339 addr = ipvlan_ht_addr_lookup(port, i6addr, true);
341 } else if (addr_type == IPVL_IPV4) {
345 ip4h = (struct iphdr *)lyr3h;
346 i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
347 addr = ipvlan_ht_addr_lookup(port, i4addr, false);
348 } else if (addr_type == IPVL_ARP) {
350 unsigned char *arp_ptr;
353 arph = (struct arphdr *)lyr3h;
354 arp_ptr = (unsigned char *)(arph + 1);
356 arp_ptr += (2 * port->dev->addr_len) + 4;
358 arp_ptr += port->dev->addr_len;
360 memcpy(&dip, arp_ptr, 4);
361 addr = ipvlan_ht_addr_lookup(port, &dip, false);
367 static int ipvlan_process_v4_outbound(struct sk_buff *skb)
369 const struct iphdr *ip4h = ip_hdr(skb);
370 struct net_device *dev = skb->dev;
371 struct net *net = dev_net(dev);
373 int err, ret = NET_XMIT_DROP;
374 struct flowi4 fl4 = {
375 .flowi4_oif = dev->ifindex,
376 .flowi4_tos = RT_TOS(ip4h->tos),
377 .flowi4_flags = FLOWI_FLAG_ANYSRC,
378 .flowi4_mark = skb->mark,
379 .daddr = ip4h->daddr,
380 .saddr = ip4h->saddr,
383 rt = ip_route_output_flow(net, &fl4, NULL);
387 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
391 skb_dst_set(skb, &rt->dst);
392 err = ip_local_out(net, skb->sk, skb);
393 if (unlikely(net_xmit_eval(err)))
394 dev->stats.tx_errors++;
396 ret = NET_XMIT_SUCCESS;
399 dev->stats.tx_errors++;
405 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
407 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
408 struct net_device *dev = skb->dev;
409 struct net *net = dev_net(dev);
410 struct dst_entry *dst;
411 int err, ret = NET_XMIT_DROP;
412 struct flowi6 fl6 = {
413 .flowi6_oif = dev->ifindex,
414 .daddr = ip6h->daddr,
415 .saddr = ip6h->saddr,
416 .flowi6_flags = FLOWI_FLAG_ANYSRC,
417 .flowlabel = ip6_flowinfo(ip6h),
418 .flowi6_mark = skb->mark,
419 .flowi6_proto = ip6h->nexthdr,
422 dst = ip6_route_output(net, NULL, &fl6);
428 skb_dst_set(skb, dst);
429 err = ip6_local_out(net, skb->sk, skb);
430 if (unlikely(net_xmit_eval(err)))
431 dev->stats.tx_errors++;
433 ret = NET_XMIT_SUCCESS;
436 dev->stats.tx_errors++;
442 static int ipvlan_process_outbound(struct sk_buff *skb)
444 int ret = NET_XMIT_DROP;
446 /* The ipvlan is a pseudo-L2 device, so the packets that we receive
447 * will have L2; which need to discarded and processed further
448 * in the net-ns of the main-device.
450 if (skb_mac_header_was_set(skb)) {
451 /* In this mode we dont care about
452 * multicast and broadcast traffic */
453 struct ethhdr *ethh = eth_hdr(skb);
455 if (is_multicast_ether_addr(ethh->h_dest)) {
456 pr_debug_ratelimited(
457 "Dropped {multi|broad}cast of type=[%x]\n",
458 ntohs(skb->protocol));
463 skb_pull(skb, sizeof(*ethh));
464 skb->mac_header = (typeof(skb->mac_header))~0U;
465 skb_reset_network_header(skb);
468 if (skb->protocol == htons(ETH_P_IPV6))
469 ret = ipvlan_process_v6_outbound(skb);
470 else if (skb->protocol == htons(ETH_P_IP))
471 ret = ipvlan_process_v4_outbound(skb);
473 pr_warn_ratelimited("Dropped outbound packet type=%x\n",
474 ntohs(skb->protocol));
481 static void ipvlan_multicast_enqueue(struct ipvl_port *port,
484 if (skb->protocol == htons(ETH_P_PAUSE)) {
489 spin_lock(&port->backlog.lock);
490 if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
491 __skb_queue_tail(&port->backlog, skb);
492 spin_unlock(&port->backlog.lock);
493 schedule_work(&port->wq);
495 spin_unlock(&port->backlog.lock);
496 atomic_long_inc(&skb->dev->rx_dropped);
501 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
503 const struct ipvl_dev *ipvlan = netdev_priv(dev);
505 struct ipvl_addr *addr;
508 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
512 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
514 return ipvlan_rcv_frame(addr, &skb, true);
517 ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
518 return ipvlan_process_outbound(skb);
521 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
523 const struct ipvl_dev *ipvlan = netdev_priv(dev);
524 struct ethhdr *eth = skb_eth_hdr(skb);
525 struct ipvl_addr *addr;
529 if (ether_addr_equal(eth->h_dest, eth->h_source)) {
530 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
532 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
534 return ipvlan_rcv_frame(addr, &skb, true);
536 skb = skb_share_check(skb, GFP_ATOMIC);
538 return NET_XMIT_DROP;
540 /* Packet definitely does not belong to any of the
541 * virtual devices, but the dest is local. So forward
542 * the skb for the main-dev. At the RX side we just return
543 * RX_PASS for it to be processed further on the stack.
545 return dev_forward_skb(ipvlan->phy_dev, skb);
547 } else if (is_multicast_ether_addr(eth->h_dest)) {
548 skb_reset_mac_header(skb);
549 ipvlan_skb_crossing_ns(skb, NULL);
550 ipvlan_multicast_enqueue(ipvlan->port, skb);
551 return NET_XMIT_SUCCESS;
554 ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
555 return dev_queue_xmit(skb);
558 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
560 struct ipvl_dev *ipvlan = netdev_priv(dev);
561 struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
566 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
571 return ipvlan_xmit_mode_l2(skb, dev);
573 case IPVLAN_MODE_L3S:
574 return ipvlan_xmit_mode_l3(skb, dev);
577 /* Should not reach here */
578 WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n",
582 return NET_XMIT_DROP;
585 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
587 struct ethhdr *eth = eth_hdr(skb);
588 struct ipvl_addr *addr;
592 if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
593 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
597 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
605 static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
606 struct ipvl_port *port)
610 struct ipvl_addr *addr;
611 struct sk_buff *skb = *pskb;
612 rx_handler_result_t ret = RX_HANDLER_PASS;
614 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
618 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
620 ret = ipvlan_rcv_frame(addr, pskb, false);
626 static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
627 struct ipvl_port *port)
629 struct sk_buff *skb = *pskb;
630 struct ethhdr *eth = eth_hdr(skb);
631 rx_handler_result_t ret = RX_HANDLER_PASS;
635 if (is_multicast_ether_addr(eth->h_dest)) {
636 if (ipvlan_external_frame(skb, port)) {
637 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
639 /* External frames are queued for device local
640 * distribution, but a copy is given to master
641 * straight away to avoid sending duplicates later
642 * when work-queue processes this frame. This is
643 * achieved by returning RX_HANDLER_PASS.
646 ipvlan_skb_crossing_ns(nskb, NULL);
647 ipvlan_multicast_enqueue(port, nskb);
651 struct ipvl_addr *addr;
653 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
657 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
659 ret = ipvlan_rcv_frame(addr, pskb, false);
665 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
667 struct sk_buff *skb = *pskb;
668 struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
671 return RX_HANDLER_PASS;
673 switch (port->mode) {
675 return ipvlan_handle_mode_l2(pskb, port);
677 return ipvlan_handle_mode_l3(pskb, port);
678 case IPVLAN_MODE_L3S:
679 return RX_HANDLER_PASS;
682 /* Should not reach here */
683 WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
686 return RX_HANDLER_CONSUMED;
689 static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
690 struct net_device *dev)
692 struct ipvl_addr *addr = NULL;
693 struct ipvl_port *port;
697 if (!dev || !netif_is_ipvlan_port(dev))
700 port = ipvlan_port_get_rcu(dev);
701 if (!port || port->mode != IPVLAN_MODE_L3S)
704 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
708 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
713 struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
716 struct ipvl_addr *addr;
717 struct net_device *sdev;
719 addr = ipvlan_skb_to_addr(skb, dev);
723 sdev = addr->master->dev;
728 struct iphdr *ip4h = ip_hdr(skb);
730 err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr,
738 struct dst_entry *dst;
739 struct ipv6hdr *ip6h = ipv6_hdr(skb);
740 int flags = RT6_LOOKUP_F_HAS_SADDR;
741 struct flowi6 fl6 = {
742 .flowi6_iif = sdev->ifindex,
743 .daddr = ip6h->daddr,
744 .saddr = ip6h->saddr,
745 .flowlabel = ip6_flowinfo(ip6h),
746 .flowi6_mark = skb->mark,
747 .flowi6_proto = ip6h->nexthdr,
751 dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, flags);
752 skb_dst_set(skb, dst);
763 unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
764 const struct nf_hook_state *state)
766 struct ipvl_addr *addr;
769 addr = ipvlan_skb_to_addr(skb, skb->dev);
773 skb->dev = addr->master->dev;
774 len = skb->len + ETH_HLEN;
775 ipvlan_count_rx(addr->master, len, true, false);