GNU Linux-libre 5.10.217-gnu1
[releases.git] / drivers / net / ipvlan / ipvlan_core.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
3  */
4
5 #include "ipvlan.h"
6
7 static u32 ipvlan_jhash_secret __read_mostly;
8
9 void ipvlan_init_secret(void)
10 {
11         net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
12 }
13
14 void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
15                             unsigned int len, bool success, bool mcast)
16 {
17         if (likely(success)) {
18                 struct ipvl_pcpu_stats *pcptr;
19
20                 pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
21                 u64_stats_update_begin(&pcptr->syncp);
22                 pcptr->rx_pkts++;
23                 pcptr->rx_bytes += len;
24                 if (mcast)
25                         pcptr->rx_mcast++;
26                 u64_stats_update_end(&pcptr->syncp);
27         } else {
28                 this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
29         }
30 }
31 EXPORT_SYMBOL_GPL(ipvlan_count_rx);
32
33 #if IS_ENABLED(CONFIG_IPV6)
34 static u8 ipvlan_get_v6_hash(const void *iaddr)
35 {
36         const struct in6_addr *ip6_addr = iaddr;
37
38         return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
39                IPVLAN_HASH_MASK;
40 }
41 #else
42 static u8 ipvlan_get_v6_hash(const void *iaddr)
43 {
44         return 0;
45 }
46 #endif
47
48 static u8 ipvlan_get_v4_hash(const void *iaddr)
49 {
50         const struct in_addr *ip4_addr = iaddr;
51
52         return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
53                IPVLAN_HASH_MASK;
54 }
55
56 static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
57 {
58         if (!is_v6 && addr->atype == IPVL_IPV4) {
59                 struct in_addr *i4addr = (struct in_addr *)iaddr;
60
61                 return addr->ip4addr.s_addr == i4addr->s_addr;
62 #if IS_ENABLED(CONFIG_IPV6)
63         } else if (is_v6 && addr->atype == IPVL_IPV6) {
64                 struct in6_addr *i6addr = (struct in6_addr *)iaddr;
65
66                 return ipv6_addr_equal(&addr->ip6addr, i6addr);
67 #endif
68         }
69
70         return false;
71 }
72
73 static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
74                                                const void *iaddr, bool is_v6)
75 {
76         struct ipvl_addr *addr;
77         u8 hash;
78
79         hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
80                ipvlan_get_v4_hash(iaddr);
81         hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
82                 if (addr_equal(is_v6, addr, iaddr))
83                         return addr;
84         return NULL;
85 }
86
87 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
88 {
89         struct ipvl_port *port = ipvlan->port;
90         u8 hash;
91
92         hash = (addr->atype == IPVL_IPV6) ?
93                ipvlan_get_v6_hash(&addr->ip6addr) :
94                ipvlan_get_v4_hash(&addr->ip4addr);
95         if (hlist_unhashed(&addr->hlnode))
96                 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
97 }
98
99 void ipvlan_ht_addr_del(struct ipvl_addr *addr)
100 {
101         hlist_del_init_rcu(&addr->hlnode);
102 }
103
104 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
105                                    const void *iaddr, bool is_v6)
106 {
107         struct ipvl_addr *addr, *ret = NULL;
108
109         rcu_read_lock();
110         list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
111                 if (addr_equal(is_v6, addr, iaddr)) {
112                         ret = addr;
113                         break;
114                 }
115         }
116         rcu_read_unlock();
117         return ret;
118 }
119
120 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
121 {
122         struct ipvl_dev *ipvlan;
123         bool ret = false;
124
125         rcu_read_lock();
126         list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
127                 if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) {
128                         ret = true;
129                         break;
130                 }
131         }
132         rcu_read_unlock();
133         return ret;
134 }
135
136 void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
137 {
138         void *lyr3h = NULL;
139
140         switch (skb->protocol) {
141         case htons(ETH_P_ARP): {
142                 struct arphdr *arph;
143
144                 if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
145                         return NULL;
146
147                 arph = arp_hdr(skb);
148                 *type = IPVL_ARP;
149                 lyr3h = arph;
150                 break;
151         }
152         case htons(ETH_P_IP): {
153                 u32 pktlen;
154                 struct iphdr *ip4h;
155
156                 if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
157                         return NULL;
158
159                 ip4h = ip_hdr(skb);
160                 pktlen = ntohs(ip4h->tot_len);
161                 if (ip4h->ihl < 5 || ip4h->version != 4)
162                         return NULL;
163                 if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
164                         return NULL;
165
166                 *type = IPVL_IPV4;
167                 lyr3h = ip4h;
168                 break;
169         }
170 #if IS_ENABLED(CONFIG_IPV6)
171         case htons(ETH_P_IPV6): {
172                 struct ipv6hdr *ip6h;
173
174                 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
175                         return NULL;
176
177                 ip6h = ipv6_hdr(skb);
178                 if (ip6h->version != 6)
179                         return NULL;
180
181                 *type = IPVL_IPV6;
182                 lyr3h = ip6h;
183                 /* Only Neighbour Solicitation pkts need different treatment */
184                 if (ipv6_addr_any(&ip6h->saddr) &&
185                     ip6h->nexthdr == NEXTHDR_ICMP) {
186                         struct icmp6hdr *icmph;
187
188                         if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
189                                 return NULL;
190
191                         ip6h = ipv6_hdr(skb);
192                         icmph = (struct icmp6hdr *)(ip6h + 1);
193
194                         if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
195                                 /* Need to access the ipv6 address in body */
196                                 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
197                                                 + sizeof(struct in6_addr))))
198                                         return NULL;
199
200                                 ip6h = ipv6_hdr(skb);
201                                 icmph = (struct icmp6hdr *)(ip6h + 1);
202                         }
203
204                         *type = IPVL_ICMPV6;
205                         lyr3h = icmph;
206                 }
207                 break;
208         }
209 #endif
210         default:
211                 return NULL;
212         }
213
214         return lyr3h;
215 }
216
217 unsigned int ipvlan_mac_hash(const unsigned char *addr)
218 {
219         u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
220                                ipvlan_jhash_secret);
221
222         return hash & IPVLAN_MAC_FILTER_MASK;
223 }
224
225 void ipvlan_process_multicast(struct work_struct *work)
226 {
227         struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
228         struct ethhdr *ethh;
229         struct ipvl_dev *ipvlan;
230         struct sk_buff *skb, *nskb;
231         struct sk_buff_head list;
232         unsigned int len;
233         unsigned int mac_hash;
234         int ret;
235         u8 pkt_type;
236         bool tx_pkt;
237
238         __skb_queue_head_init(&list);
239
240         spin_lock_bh(&port->backlog.lock);
241         skb_queue_splice_tail_init(&port->backlog, &list);
242         spin_unlock_bh(&port->backlog.lock);
243
244         while ((skb = __skb_dequeue(&list)) != NULL) {
245                 struct net_device *dev = skb->dev;
246                 bool consumed = false;
247
248                 ethh = eth_hdr(skb);
249                 tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
250                 mac_hash = ipvlan_mac_hash(ethh->h_dest);
251
252                 if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
253                         pkt_type = PACKET_BROADCAST;
254                 else
255                         pkt_type = PACKET_MULTICAST;
256
257                 rcu_read_lock();
258                 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
259                         if (tx_pkt && (ipvlan->dev == skb->dev))
260                                 continue;
261                         if (!test_bit(mac_hash, ipvlan->mac_filters))
262                                 continue;
263                         if (!(ipvlan->dev->flags & IFF_UP))
264                                 continue;
265                         ret = NET_RX_DROP;
266                         len = skb->len + ETH_HLEN;
267                         nskb = skb_clone(skb, GFP_ATOMIC);
268                         local_bh_disable();
269                         if (nskb) {
270                                 consumed = true;
271                                 nskb->pkt_type = pkt_type;
272                                 nskb->dev = ipvlan->dev;
273                                 if (tx_pkt)
274                                         ret = dev_forward_skb(ipvlan->dev, nskb);
275                                 else
276                                         ret = netif_rx(nskb);
277                         }
278                         ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
279                         local_bh_enable();
280                 }
281                 rcu_read_unlock();
282
283                 if (tx_pkt) {
284                         /* If the packet originated here, send it out. */
285                         skb->dev = port->dev;
286                         skb->pkt_type = pkt_type;
287                         dev_queue_xmit(skb);
288                 } else {
289                         if (consumed)
290                                 consume_skb(skb);
291                         else
292                                 kfree_skb(skb);
293                 }
294                 if (dev)
295                         dev_put(dev);
296                 cond_resched();
297         }
298 }
299
300 static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
301 {
302         bool xnet = true;
303
304         if (dev)
305                 xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
306
307         skb_scrub_packet(skb, xnet);
308         if (dev)
309                 skb->dev = dev;
310 }
311
312 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
313                             bool local)
314 {
315         struct ipvl_dev *ipvlan = addr->master;
316         struct net_device *dev = ipvlan->dev;
317         unsigned int len;
318         rx_handler_result_t ret = RX_HANDLER_CONSUMED;
319         bool success = false;
320         struct sk_buff *skb = *pskb;
321
322         len = skb->len + ETH_HLEN;
323         /* Only packets exchanged between two local slaves need to have
324          * device-up check as well as skb-share check.
325          */
326         if (local) {
327                 if (unlikely(!(dev->flags & IFF_UP))) {
328                         kfree_skb(skb);
329                         goto out;
330                 }
331
332                 skb = skb_share_check(skb, GFP_ATOMIC);
333                 if (!skb)
334                         goto out;
335
336                 *pskb = skb;
337         }
338
339         if (local) {
340                 skb->pkt_type = PACKET_HOST;
341                 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
342                         success = true;
343         } else {
344                 skb->dev = dev;
345                 ret = RX_HANDLER_ANOTHER;
346                 success = true;
347         }
348
349 out:
350         ipvlan_count_rx(ipvlan, len, success, false);
351         return ret;
352 }
353
354 struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
355                                      int addr_type, bool use_dest)
356 {
357         struct ipvl_addr *addr = NULL;
358
359         switch (addr_type) {
360 #if IS_ENABLED(CONFIG_IPV6)
361         case IPVL_IPV6: {
362                 struct ipv6hdr *ip6h;
363                 struct in6_addr *i6addr;
364
365                 ip6h = (struct ipv6hdr *)lyr3h;
366                 i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
367                 addr = ipvlan_ht_addr_lookup(port, i6addr, true);
368                 break;
369         }
370         case IPVL_ICMPV6: {
371                 struct nd_msg *ndmh;
372                 struct in6_addr *i6addr;
373
374                 /* Make sure that the NeighborSolicitation ICMPv6 packets
375                  * are handled to avoid DAD issue.
376                  */
377                 ndmh = (struct nd_msg *)lyr3h;
378                 if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
379                         i6addr = &ndmh->target;
380                         addr = ipvlan_ht_addr_lookup(port, i6addr, true);
381                 }
382                 break;
383         }
384 #endif
385         case IPVL_IPV4: {
386                 struct iphdr *ip4h;
387                 __be32 *i4addr;
388
389                 ip4h = (struct iphdr *)lyr3h;
390                 i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
391                 addr = ipvlan_ht_addr_lookup(port, i4addr, false);
392                 break;
393         }
394         case IPVL_ARP: {
395                 struct arphdr *arph;
396                 unsigned char *arp_ptr;
397                 __be32 dip;
398
399                 arph = (struct arphdr *)lyr3h;
400                 arp_ptr = (unsigned char *)(arph + 1);
401                 if (use_dest)
402                         arp_ptr += (2 * port->dev->addr_len) + 4;
403                 else
404                         arp_ptr += port->dev->addr_len;
405
406                 memcpy(&dip, arp_ptr, 4);
407                 addr = ipvlan_ht_addr_lookup(port, &dip, false);
408                 break;
409         }
410         }
411
412         return addr;
413 }
414
415 static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
416 {
417         const struct iphdr *ip4h = ip_hdr(skb);
418         struct net_device *dev = skb->dev;
419         struct net *net = dev_net(dev);
420         struct rtable *rt;
421         int err, ret = NET_XMIT_DROP;
422         struct flowi4 fl4 = {
423                 .flowi4_oif = dev->ifindex,
424                 .flowi4_tos = RT_TOS(ip4h->tos),
425                 .flowi4_flags = FLOWI_FLAG_ANYSRC,
426                 .flowi4_mark = skb->mark,
427                 .daddr = ip4h->daddr,
428                 .saddr = ip4h->saddr,
429         };
430
431         rt = ip_route_output_flow(net, &fl4, NULL);
432         if (IS_ERR(rt))
433                 goto err;
434
435         if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
436                 ip_rt_put(rt);
437                 goto err;
438         }
439         skb_dst_set(skb, &rt->dst);
440
441         memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
442
443         err = ip_local_out(net, skb->sk, skb);
444         if (unlikely(net_xmit_eval(err)))
445                 DEV_STATS_INC(dev, tx_errors);
446         else
447                 ret = NET_XMIT_SUCCESS;
448         goto out;
449 err:
450         DEV_STATS_INC(dev, tx_errors);
451         kfree_skb(skb);
452 out:
453         return ret;
454 }
455
456 #if IS_ENABLED(CONFIG_IPV6)
457
458 static noinline_for_stack int
459 ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
460 {
461         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
462         struct flowi6 fl6 = {
463                 .flowi6_oif = dev->ifindex,
464                 .daddr = ip6h->daddr,
465                 .saddr = ip6h->saddr,
466                 .flowi6_flags = FLOWI_FLAG_ANYSRC,
467                 .flowlabel = ip6_flowinfo(ip6h),
468                 .flowi6_mark = skb->mark,
469                 .flowi6_proto = ip6h->nexthdr,
470         };
471         struct dst_entry *dst;
472         int err;
473
474         dst = ip6_route_output(dev_net(dev), NULL, &fl6);
475         err = dst->error;
476         if (err) {
477                 dst_release(dst);
478                 return err;
479         }
480         skb_dst_set(skb, dst);
481         return 0;
482 }
483
484 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
485 {
486         struct net_device *dev = skb->dev;
487         int err, ret = NET_XMIT_DROP;
488
489         err = ipvlan_route_v6_outbound(dev, skb);
490         if (unlikely(err)) {
491                 DEV_STATS_INC(dev, tx_errors);
492                 kfree_skb(skb);
493                 return err;
494         }
495
496         memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
497
498         err = ip6_local_out(dev_net(dev), skb->sk, skb);
499         if (unlikely(net_xmit_eval(err)))
500                 DEV_STATS_INC(dev, tx_errors);
501         else
502                 ret = NET_XMIT_SUCCESS;
503         return ret;
504 }
505 #else
506 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
507 {
508         return NET_XMIT_DROP;
509 }
510 #endif
511
512 static int ipvlan_process_outbound(struct sk_buff *skb)
513 {
514         int ret = NET_XMIT_DROP;
515
516         /* The ipvlan is a pseudo-L2 device, so the packets that we receive
517          * will have L2; which need to discarded and processed further
518          * in the net-ns of the main-device.
519          */
520         if (skb_mac_header_was_set(skb)) {
521                 /* In this mode we dont care about
522                  * multicast and broadcast traffic */
523                 struct ethhdr *ethh = eth_hdr(skb);
524
525                 if (is_multicast_ether_addr(ethh->h_dest)) {
526                         pr_debug_ratelimited(
527                                 "Dropped {multi|broad}cast of type=[%x]\n",
528                                 ntohs(skb->protocol));
529                         kfree_skb(skb);
530                         goto out;
531                 }
532
533                 skb_pull(skb, sizeof(*ethh));
534                 skb->mac_header = (typeof(skb->mac_header))~0U;
535                 skb_reset_network_header(skb);
536         }
537
538         if (skb->protocol == htons(ETH_P_IPV6))
539                 ret = ipvlan_process_v6_outbound(skb);
540         else if (skb->protocol == htons(ETH_P_IP))
541                 ret = ipvlan_process_v4_outbound(skb);
542         else {
543                 pr_warn_ratelimited("Dropped outbound packet type=%x\n",
544                                     ntohs(skb->protocol));
545                 kfree_skb(skb);
546         }
547 out:
548         return ret;
549 }
550
551 static void ipvlan_multicast_enqueue(struct ipvl_port *port,
552                                      struct sk_buff *skb, bool tx_pkt)
553 {
554         if (skb->protocol == htons(ETH_P_PAUSE)) {
555                 kfree_skb(skb);
556                 return;
557         }
558
559         /* Record that the deferred packet is from TX or RX path. By
560          * looking at mac-addresses on packet will lead to erronus decisions.
561          * (This would be true for a loopback-mode on master device or a
562          * hair-pin mode of the switch.)
563          */
564         IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
565
566         spin_lock(&port->backlog.lock);
567         if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
568                 if (skb->dev)
569                         dev_hold(skb->dev);
570                 __skb_queue_tail(&port->backlog, skb);
571                 spin_unlock(&port->backlog.lock);
572                 schedule_work(&port->wq);
573         } else {
574                 spin_unlock(&port->backlog.lock);
575                 atomic_long_inc(&skb->dev->rx_dropped);
576                 kfree_skb(skb);
577         }
578 }
579
580 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
581 {
582         const struct ipvl_dev *ipvlan = netdev_priv(dev);
583         void *lyr3h;
584         struct ipvl_addr *addr;
585         int addr_type;
586
587         lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
588         if (!lyr3h)
589                 goto out;
590
591         if (!ipvlan_is_vepa(ipvlan->port)) {
592                 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
593                 if (addr) {
594                         if (ipvlan_is_private(ipvlan->port)) {
595                                 consume_skb(skb);
596                                 return NET_XMIT_DROP;
597                         }
598                         ipvlan_rcv_frame(addr, &skb, true);
599                         return NET_XMIT_SUCCESS;
600                 }
601         }
602 out:
603         ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
604         return ipvlan_process_outbound(skb);
605 }
606
607 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
608 {
609         const struct ipvl_dev *ipvlan = netdev_priv(dev);
610         struct ethhdr *eth = skb_eth_hdr(skb);
611         struct ipvl_addr *addr;
612         void *lyr3h;
613         int addr_type;
614
615         if (!ipvlan_is_vepa(ipvlan->port) &&
616             ether_addr_equal(eth->h_dest, eth->h_source)) {
617                 lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
618                 if (lyr3h) {
619                         addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
620                         if (addr) {
621                                 if (ipvlan_is_private(ipvlan->port)) {
622                                         consume_skb(skb);
623                                         return NET_XMIT_DROP;
624                                 }
625                                 ipvlan_rcv_frame(addr, &skb, true);
626                                 return NET_XMIT_SUCCESS;
627                         }
628                 }
629                 skb = skb_share_check(skb, GFP_ATOMIC);
630                 if (!skb)
631                         return NET_XMIT_DROP;
632
633                 /* Packet definitely does not belong to any of the
634                  * virtual devices, but the dest is local. So forward
635                  * the skb for the main-dev. At the RX side we just return
636                  * RX_PASS for it to be processed further on the stack.
637                  */
638                 dev_forward_skb(ipvlan->phy_dev, skb);
639                 return NET_XMIT_SUCCESS;
640
641         } else if (is_multicast_ether_addr(eth->h_dest)) {
642                 skb_reset_mac_header(skb);
643                 ipvlan_skb_crossing_ns(skb, NULL);
644                 ipvlan_multicast_enqueue(ipvlan->port, skb, true);
645                 return NET_XMIT_SUCCESS;
646         }
647
648         skb->dev = ipvlan->phy_dev;
649         return dev_queue_xmit(skb);
650 }
651
652 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
653 {
654         struct ipvl_dev *ipvlan = netdev_priv(dev);
655         struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
656
657         if (!port)
658                 goto out;
659
660         if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
661                 goto out;
662
663         switch(port->mode) {
664         case IPVLAN_MODE_L2:
665                 return ipvlan_xmit_mode_l2(skb, dev);
666         case IPVLAN_MODE_L3:
667 #ifdef CONFIG_IPVLAN_L3S
668         case IPVLAN_MODE_L3S:
669 #endif
670                 return ipvlan_xmit_mode_l3(skb, dev);
671         }
672
673         /* Should not reach here */
674         WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n",
675                           port->mode);
676 out:
677         kfree_skb(skb);
678         return NET_XMIT_DROP;
679 }
680
681 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
682 {
683         struct ethhdr *eth = eth_hdr(skb);
684         struct ipvl_addr *addr;
685         void *lyr3h;
686         int addr_type;
687
688         if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
689                 lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
690                 if (!lyr3h)
691                         return true;
692
693                 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
694                 if (addr)
695                         return false;
696         }
697
698         return true;
699 }
700
701 static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
702                                                  struct ipvl_port *port)
703 {
704         void *lyr3h;
705         int addr_type;
706         struct ipvl_addr *addr;
707         struct sk_buff *skb = *pskb;
708         rx_handler_result_t ret = RX_HANDLER_PASS;
709
710         lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
711         if (!lyr3h)
712                 goto out;
713
714         addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
715         if (addr)
716                 ret = ipvlan_rcv_frame(addr, pskb, false);
717
718 out:
719         return ret;
720 }
721
722 static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
723                                                  struct ipvl_port *port)
724 {
725         struct sk_buff *skb = *pskb;
726         struct ethhdr *eth = eth_hdr(skb);
727         rx_handler_result_t ret = RX_HANDLER_PASS;
728
729         if (is_multicast_ether_addr(eth->h_dest)) {
730                 if (ipvlan_external_frame(skb, port)) {
731                         struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
732
733                         /* External frames are queued for device local
734                          * distribution, but a copy is given to master
735                          * straight away to avoid sending duplicates later
736                          * when work-queue processes this frame. This is
737                          * achieved by returning RX_HANDLER_PASS.
738                          */
739                         if (nskb) {
740                                 ipvlan_skb_crossing_ns(nskb, NULL);
741                                 ipvlan_multicast_enqueue(port, nskb, false);
742                         }
743                 }
744         } else {
745                 /* Perform like l3 mode for non-multicast packet */
746                 ret = ipvlan_handle_mode_l3(pskb, port);
747         }
748
749         return ret;
750 }
751
752 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
753 {
754         struct sk_buff *skb = *pskb;
755         struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
756
757         if (!port)
758                 return RX_HANDLER_PASS;
759
760         switch (port->mode) {
761         case IPVLAN_MODE_L2:
762                 return ipvlan_handle_mode_l2(pskb, port);
763         case IPVLAN_MODE_L3:
764                 return ipvlan_handle_mode_l3(pskb, port);
765 #ifdef CONFIG_IPVLAN_L3S
766         case IPVLAN_MODE_L3S:
767                 return RX_HANDLER_PASS;
768 #endif
769         }
770
771         /* Should not reach here */
772         WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
773                           port->mode);
774         kfree_skb(skb);
775         return RX_HANDLER_CONSUMED;
776 }