1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com> */
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/skbuff.h>
9 #include <linux/jhash.h>
10 #include <linux/if_tunnel.h>
11 #include <linux/net.h>
12 #include <linux/igmp.h>
13 #include <linux/workqueue.h>
14 #include <net/sch_generic.h>
15 #include <net/net_namespace.h>
18 #include <net/udp_tunnel.h>
22 #include <uapi/linux/amt.h>
23 #include <linux/security.h>
24 #include <net/gro_cells.h>
26 #include <net/if_inet6.h>
27 #include <net/ndisc.h>
28 #include <net/addrconf.h>
29 #include <net/ip6_route.h>
30 #include <net/inet_common.h>
31 #include <net/ip6_checksum.h>
33 static struct workqueue_struct *amt_wq;
35 static HLIST_HEAD(source_gc_list);
36 /* Lock for source_gc_list */
37 static spinlock_t source_gc_lock;
38 static struct delayed_work source_gc_wq;
39 static char *status_str[] = {
41 "AMT_STATUS_SENT_DISCOVERY",
42 "AMT_STATUS_RECEIVED_DISCOVERY",
43 "AMT_STATUS_SENT_ADVERTISEMENT",
44 "AMT_STATUS_RECEIVED_ADVERTISEMENT",
45 "AMT_STATUS_SENT_REQUEST",
46 "AMT_STATUS_RECEIVED_REQUEST",
47 "AMT_STATUS_SENT_QUERY",
48 "AMT_STATUS_RECEIVED_QUERY",
49 "AMT_STATUS_SENT_UPDATE",
50 "AMT_STATUS_RECEIVED_UPDATE",
53 static char *type_str[] = {
54 "", /* Type 0 is not defined */
56 "AMT_MSG_ADVERTISEMENT",
58 "AMT_MSG_MEMBERSHIP_QUERY",
59 "AMT_MSG_MEMBERSHIP_UPDATE",
60 "AMT_MSG_MULTICAST_DATA",
64 static char *action_str[] = {
68 "AMT_ACT_STATUS_FWD_NEW",
69 "AMT_ACT_STATUS_D_FWD_NEW",
70 "AMT_ACT_STATUS_NONE_NEW",
73 static struct igmpv3_grec igmpv3_zero_grec;
75 #if IS_ENABLED(CONFIG_IPV6)
76 #define MLD2_ALL_NODE_INIT { { { 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01 } } }
77 static struct in6_addr mld2_all_node = MLD2_ALL_NODE_INIT;
78 static struct mld2_grec mldv2_zero_grec;
81 static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb)
83 BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct qdisc_skb_cb) >
84 sizeof_field(struct sk_buff, cb));
86 return (struct amt_skb_cb *)((void *)skb->cb +
87 sizeof(struct qdisc_skb_cb));
90 static void __amt_source_gc_work(void)
92 struct amt_source_node *snode;
93 struct hlist_head gc_list;
96 spin_lock_bh(&source_gc_lock);
97 hlist_move_list(&source_gc_list, &gc_list);
98 spin_unlock_bh(&source_gc_lock);
100 hlist_for_each_entry_safe(snode, t, &gc_list, node) {
101 hlist_del_rcu(&snode->node);
102 kfree_rcu(snode, rcu);
106 static void amt_source_gc_work(struct work_struct *work)
108 __amt_source_gc_work();
110 spin_lock_bh(&source_gc_lock);
111 mod_delayed_work(amt_wq, &source_gc_wq,
112 msecs_to_jiffies(AMT_GC_INTERVAL));
113 spin_unlock_bh(&source_gc_lock);
116 static bool amt_addr_equal(union amt_addr *a, union amt_addr *b)
118 return !memcmp(a, b, sizeof(union amt_addr));
121 static u32 amt_source_hash(struct amt_tunnel_list *tunnel, union amt_addr *src)
123 u32 hash = jhash(src, sizeof(*src), tunnel->amt->hash_seed);
125 return reciprocal_scale(hash, tunnel->amt->hash_buckets);
128 static bool amt_status_filter(struct amt_source_node *snode,
129 enum amt_filter filter)
135 if (snode->status == AMT_SOURCE_STATUS_FWD &&
136 snode->flags == AMT_SOURCE_OLD)
139 case AMT_FILTER_D_FWD:
140 if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
141 snode->flags == AMT_SOURCE_OLD)
144 case AMT_FILTER_FWD_NEW:
145 if (snode->status == AMT_SOURCE_STATUS_FWD &&
146 snode->flags == AMT_SOURCE_NEW)
149 case AMT_FILTER_D_FWD_NEW:
150 if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
151 snode->flags == AMT_SOURCE_NEW)
157 case AMT_FILTER_NONE_NEW:
158 if (snode->status == AMT_SOURCE_STATUS_NONE &&
159 snode->flags == AMT_SOURCE_NEW)
162 case AMT_FILTER_BOTH:
163 if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
164 snode->status == AMT_SOURCE_STATUS_FWD) &&
165 snode->flags == AMT_SOURCE_OLD)
168 case AMT_FILTER_BOTH_NEW:
169 if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
170 snode->status == AMT_SOURCE_STATUS_FWD) &&
171 snode->flags == AMT_SOURCE_NEW)
182 static struct amt_source_node *amt_lookup_src(struct amt_tunnel_list *tunnel,
183 struct amt_group_node *gnode,
184 enum amt_filter filter,
187 u32 hash = amt_source_hash(tunnel, src);
188 struct amt_source_node *snode;
190 hlist_for_each_entry_rcu(snode, &gnode->sources[hash], node)
191 if (amt_status_filter(snode, filter) &&
192 amt_addr_equal(&snode->source_addr, src))
198 static u32 amt_group_hash(struct amt_tunnel_list *tunnel, union amt_addr *group)
200 u32 hash = jhash(group, sizeof(*group), tunnel->amt->hash_seed);
202 return reciprocal_scale(hash, tunnel->amt->hash_buckets);
205 static struct amt_group_node *amt_lookup_group(struct amt_tunnel_list *tunnel,
206 union amt_addr *group,
207 union amt_addr *host,
210 u32 hash = amt_group_hash(tunnel, group);
211 struct amt_group_node *gnode;
213 hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], node) {
214 if (amt_addr_equal(&gnode->group_addr, group) &&
215 amt_addr_equal(&gnode->host_addr, host) &&
223 static void amt_destroy_source(struct amt_source_node *snode)
225 struct amt_group_node *gnode = snode->gnode;
226 struct amt_tunnel_list *tunnel;
228 tunnel = gnode->tunnel_list;
231 netdev_dbg(snode->gnode->amt->dev,
232 "Delete source %pI4 from %pI4\n",
233 &snode->source_addr.ip4,
234 &gnode->group_addr.ip4);
235 #if IS_ENABLED(CONFIG_IPV6)
237 netdev_dbg(snode->gnode->amt->dev,
238 "Delete source %pI6 from %pI6\n",
239 &snode->source_addr.ip6,
240 &gnode->group_addr.ip6);
244 cancel_delayed_work(&snode->source_timer);
245 hlist_del_init_rcu(&snode->node);
246 tunnel->nr_sources--;
248 spin_lock_bh(&source_gc_lock);
249 hlist_add_head_rcu(&snode->node, &source_gc_list);
250 spin_unlock_bh(&source_gc_lock);
253 static void amt_del_group(struct amt_dev *amt, struct amt_group_node *gnode)
255 struct amt_source_node *snode;
256 struct hlist_node *t;
259 if (cancel_delayed_work(&gnode->group_timer))
261 hlist_del_rcu(&gnode->node);
262 gnode->tunnel_list->nr_groups--;
265 netdev_dbg(amt->dev, "Leave group %pI4\n",
266 &gnode->group_addr.ip4);
267 #if IS_ENABLED(CONFIG_IPV6)
269 netdev_dbg(amt->dev, "Leave group %pI6\n",
270 &gnode->group_addr.ip6);
272 for (i = 0; i < amt->hash_buckets; i++)
273 hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node)
274 amt_destroy_source(snode);
276 /* tunnel->lock was acquired outside of amt_del_group()
277 * But rcu_read_lock() was acquired too so It's safe.
279 kfree_rcu(gnode, rcu);
282 /* If a source timer expires with a router filter-mode for the group of
283 * INCLUDE, the router concludes that traffic from this particular
284 * source is no longer desired on the attached network, and deletes the
285 * associated source record.
287 static void amt_source_work(struct work_struct *work)
289 struct amt_source_node *snode = container_of(to_delayed_work(work),
290 struct amt_source_node,
292 struct amt_group_node *gnode = snode->gnode;
293 struct amt_dev *amt = gnode->amt;
294 struct amt_tunnel_list *tunnel;
296 tunnel = gnode->tunnel_list;
297 spin_lock_bh(&tunnel->lock);
299 if (gnode->filter_mode == MCAST_INCLUDE) {
300 amt_destroy_source(snode);
301 if (!gnode->nr_sources)
302 amt_del_group(amt, gnode);
304 /* When a router filter-mode for a group is EXCLUDE,
305 * source records are only deleted when the group timer expires
307 snode->status = AMT_SOURCE_STATUS_D_FWD;
310 spin_unlock_bh(&tunnel->lock);
313 static void amt_act_src(struct amt_tunnel_list *tunnel,
314 struct amt_group_node *gnode,
315 struct amt_source_node *snode,
318 struct amt_dev *amt = tunnel->amt;
322 mod_delayed_work(amt_wq, &snode->source_timer,
323 msecs_to_jiffies(amt_gmi(amt)));
325 case AMT_ACT_GMI_ZERO:
326 cancel_delayed_work(&snode->source_timer);
329 mod_delayed_work(amt_wq, &snode->source_timer,
330 gnode->group_timer.timer.expires);
332 case AMT_ACT_STATUS_FWD_NEW:
333 snode->status = AMT_SOURCE_STATUS_FWD;
334 snode->flags = AMT_SOURCE_NEW;
336 case AMT_ACT_STATUS_D_FWD_NEW:
337 snode->status = AMT_SOURCE_STATUS_D_FWD;
338 snode->flags = AMT_SOURCE_NEW;
340 case AMT_ACT_STATUS_NONE_NEW:
341 cancel_delayed_work(&snode->source_timer);
342 snode->status = AMT_SOURCE_STATUS_NONE;
343 snode->flags = AMT_SOURCE_NEW;
351 netdev_dbg(amt->dev, "Source %pI4 from %pI4 Acted %s\n",
352 &snode->source_addr.ip4,
353 &gnode->group_addr.ip4,
355 #if IS_ENABLED(CONFIG_IPV6)
357 netdev_dbg(amt->dev, "Source %pI6 from %pI6 Acted %s\n",
358 &snode->source_addr.ip6,
359 &gnode->group_addr.ip6,
364 static struct amt_source_node *amt_alloc_snode(struct amt_group_node *gnode,
367 struct amt_source_node *snode;
369 snode = kzalloc(sizeof(*snode), GFP_ATOMIC);
373 memcpy(&snode->source_addr, src, sizeof(union amt_addr));
374 snode->gnode = gnode;
375 snode->status = AMT_SOURCE_STATUS_NONE;
376 snode->flags = AMT_SOURCE_NEW;
377 INIT_HLIST_NODE(&snode->node);
378 INIT_DELAYED_WORK(&snode->source_timer, amt_source_work);
383 /* RFC 3810 - 7.2.2. Definition of Filter Timers
385 * Router Mode Filter Timer Actions/Comments
386 * ----------- ----------------- ----------------
388 * INCLUDE Not Used All listeners in
391 * EXCLUDE Timer > 0 At least one listener
394 * EXCLUDE Timer == 0 No more listeners in
395 * EXCLUDE mode for the
397 * If the Requested List
400 * Record. If not, switch
401 * to INCLUDE filter mode;
404 * moved to the Include
405 * List, and the Exclude
408 static void amt_group_work(struct work_struct *work)
410 struct amt_group_node *gnode = container_of(to_delayed_work(work),
411 struct amt_group_node,
413 struct amt_tunnel_list *tunnel = gnode->tunnel_list;
414 struct amt_dev *amt = gnode->amt;
415 struct amt_source_node *snode;
416 bool delete_group = true;
417 struct hlist_node *t;
420 buckets = amt->hash_buckets;
422 spin_lock_bh(&tunnel->lock);
423 if (gnode->filter_mode == MCAST_INCLUDE) {
425 spin_unlock_bh(&tunnel->lock);
430 for (i = 0; i < buckets; i++) {
431 hlist_for_each_entry_safe(snode, t,
432 &gnode->sources[i], node) {
433 if (!delayed_work_pending(&snode->source_timer) ||
434 snode->status == AMT_SOURCE_STATUS_D_FWD) {
435 amt_destroy_source(snode);
437 delete_group = false;
438 snode->status = AMT_SOURCE_STATUS_FWD;
443 amt_del_group(amt, gnode);
445 gnode->filter_mode = MCAST_INCLUDE;
447 spin_unlock_bh(&tunnel->lock);
452 /* Non-existant group is created as INCLUDE {empty}:
454 * RFC 3376 - 5.1. Action on Change of Interface State
456 * If no interface state existed for that multicast address before
457 * the change (i.e., the change consisted of creating a new
458 * per-interface record), or if no state exists after the change
459 * (i.e., the change consisted of deleting a per-interface record),
460 * then the "non-existent" state is considered to have a filter mode
461 * of INCLUDE and an empty source list.
463 static struct amt_group_node *amt_add_group(struct amt_dev *amt,
464 struct amt_tunnel_list *tunnel,
465 union amt_addr *group,
466 union amt_addr *host,
469 struct amt_group_node *gnode;
473 if (tunnel->nr_groups >= amt->max_groups)
474 return ERR_PTR(-ENOSPC);
476 gnode = kzalloc(sizeof(*gnode) +
477 (sizeof(struct hlist_head) * amt->hash_buckets),
479 if (unlikely(!gnode))
480 return ERR_PTR(-ENOMEM);
483 gnode->group_addr = *group;
484 gnode->host_addr = *host;
486 gnode->tunnel_list = tunnel;
487 gnode->filter_mode = MCAST_INCLUDE;
488 INIT_HLIST_NODE(&gnode->node);
489 INIT_DELAYED_WORK(&gnode->group_timer, amt_group_work);
490 for (i = 0; i < amt->hash_buckets; i++)
491 INIT_HLIST_HEAD(&gnode->sources[i]);
493 hash = amt_group_hash(tunnel, group);
494 hlist_add_head_rcu(&gnode->node, &tunnel->groups[hash]);
498 netdev_dbg(amt->dev, "Join group %pI4\n",
499 &gnode->group_addr.ip4);
500 #if IS_ENABLED(CONFIG_IPV6)
502 netdev_dbg(amt->dev, "Join group %pI6\n",
503 &gnode->group_addr.ip6);
509 static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
511 u8 ra[AMT_IPHDR_OPTS] = { IPOPT_RA, 4, 0, 0 };
512 int hlen = LL_RESERVED_SPACE(amt->dev);
513 int tlen = amt->dev->needed_tailroom;
514 struct igmpv3_query *ihv3;
515 void *csum_start = NULL;
516 __sum16 *csum = NULL;
523 len = hlen + tlen + sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3);
524 skb = netdev_alloc_skb_ip_align(amt->dev, len);
528 skb_reserve(skb, hlen);
529 skb_push(skb, sizeof(*eth));
530 skb->protocol = htons(ETH_P_IP);
531 skb_reset_mac_header(skb);
532 skb->priority = TC_PRIO_CONTROL;
533 skb_put(skb, sizeof(*iph));
534 skb_put_data(skb, ra, sizeof(ra));
535 skb_put(skb, sizeof(*ihv3));
536 skb_pull(skb, sizeof(*eth));
537 skb_reset_network_header(skb);
541 iph->ihl = (sizeof(struct iphdr) + AMT_IPHDR_OPTS) >> 2;
543 iph->tot_len = htons(sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3));
544 iph->frag_off = htons(IP_DF);
547 iph->protocol = IPPROTO_IGMP;
548 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
549 iph->saddr = htonl(INADDR_ANY);
553 ether_addr_copy(eth->h_source, amt->dev->dev_addr);
554 ip_eth_mc_map(htonl(INADDR_ALLHOSTS_GROUP), eth->h_dest);
555 eth->h_proto = htons(ETH_P_IP);
557 ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
558 skb_reset_transport_header(skb);
559 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
562 ihv3->qqic = amt->qi;
565 ihv3->suppress = false;
566 ihv3->qrv = amt->net->ipv4.sysctl_igmp_qrv;
569 csum_start = (void *)ihv3;
570 *csum = ip_compute_csum(csum_start, sizeof(*ihv3));
571 offset = skb_transport_offset(skb);
572 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
573 skb->ip_summed = CHECKSUM_NONE;
575 skb_push(skb, sizeof(*eth) + sizeof(*iph) + AMT_IPHDR_OPTS);
580 static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
583 if (validate && amt->status >= status)
585 netdev_dbg(amt->dev, "Update GW status %s -> %s",
586 status_str[amt->status], status_str[status]);
587 amt->status = status;
590 static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
591 enum amt_status status,
594 if (validate && tunnel->status >= status)
596 netdev_dbg(tunnel->amt->dev,
597 "Update Tunnel(IP = %pI4, PORT = %u) status %s -> %s",
598 &tunnel->ip4, ntohs(tunnel->source_port),
599 status_str[tunnel->status], status_str[status]);
600 tunnel->status = status;
603 static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
606 spin_lock_bh(&amt->lock);
607 __amt_update_gw_status(amt, status, validate);
608 spin_unlock_bh(&amt->lock);
611 static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
612 enum amt_status status, bool validate)
614 spin_lock_bh(&tunnel->lock);
615 __amt_update_relay_status(tunnel, status, validate);
616 spin_unlock_bh(&tunnel->lock);
619 static void amt_send_discovery(struct amt_dev *amt)
621 struct amt_header_discovery *amtd;
622 int hlen, tlen, offset;
633 sock = rcu_dereference(amt->sock);
637 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
640 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
641 amt->discovery_ip, amt->local_ip,
642 amt->gw_port, amt->relay_port,
644 amt->stream_dev->ifindex);
646 amt->dev->stats.tx_errors++;
650 hlen = LL_RESERVED_SPACE(amt->dev);
651 tlen = amt->dev->needed_tailroom;
652 len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
653 skb = netdev_alloc_skb_ip_align(amt->dev, len);
656 amt->dev->stats.tx_errors++;
660 skb->priority = TC_PRIO_CONTROL;
661 skb_dst_set(skb, &rt->dst);
663 len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
664 skb_reset_network_header(skb);
666 amtd = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
668 amtd->type = AMT_MSG_DISCOVERY;
670 amtd->nonce = amt->nonce;
671 skb_push(skb, sizeof(*udph));
672 skb_reset_transport_header(skb);
674 udph->source = amt->gw_port;
675 udph->dest = amt->relay_port;
676 udph->len = htons(sizeof(*udph) + sizeof(*amtd));
678 offset = skb_transport_offset(skb);
679 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
680 udph->check = csum_tcpudp_magic(amt->local_ip, amt->discovery_ip,
681 sizeof(*udph) + sizeof(*amtd),
682 IPPROTO_UDP, skb->csum);
684 skb_push(skb, sizeof(*iph));
687 iph->ihl = (sizeof(struct iphdr)) >> 2;
690 iph->ttl = ip4_dst_hoplimit(&rt->dst);
691 iph->daddr = amt->discovery_ip;
692 iph->saddr = amt->local_ip;
693 iph->protocol = IPPROTO_UDP;
694 iph->tot_len = htons(len);
696 skb->ip_summed = CHECKSUM_NONE;
697 ip_select_ident(amt->net, skb, NULL);
699 err = ip_local_out(amt->net, sock->sk, skb);
700 if (unlikely(net_xmit_eval(err)))
701 amt->dev->stats.tx_errors++;
703 spin_lock_bh(&amt->lock);
704 __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
705 spin_unlock_bh(&amt->lock);
710 static void amt_send_request(struct amt_dev *amt, bool v6)
712 struct amt_header_request *amtrh;
713 int hlen, tlen, offset;
724 sock = rcu_dereference(amt->sock);
728 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
731 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
732 amt->remote_ip, amt->local_ip,
733 amt->gw_port, amt->relay_port,
735 amt->stream_dev->ifindex);
737 amt->dev->stats.tx_errors++;
741 hlen = LL_RESERVED_SPACE(amt->dev);
742 tlen = amt->dev->needed_tailroom;
743 len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
744 skb = netdev_alloc_skb_ip_align(amt->dev, len);
747 amt->dev->stats.tx_errors++;
751 skb->priority = TC_PRIO_CONTROL;
752 skb_dst_set(skb, &rt->dst);
754 len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
755 skb_reset_network_header(skb);
757 amtrh = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
759 amtrh->type = AMT_MSG_REQUEST;
760 amtrh->reserved1 = 0;
762 amtrh->reserved2 = 0;
763 amtrh->nonce = amt->nonce;
764 skb_push(skb, sizeof(*udph));
765 skb_reset_transport_header(skb);
767 udph->source = amt->gw_port;
768 udph->dest = amt->relay_port;
769 udph->len = htons(sizeof(*amtrh) + sizeof(*udph));
771 offset = skb_transport_offset(skb);
772 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
773 udph->check = csum_tcpudp_magic(amt->local_ip, amt->remote_ip,
774 sizeof(*udph) + sizeof(*amtrh),
775 IPPROTO_UDP, skb->csum);
777 skb_push(skb, sizeof(*iph));
780 iph->ihl = (sizeof(struct iphdr)) >> 2;
783 iph->ttl = ip4_dst_hoplimit(&rt->dst);
784 iph->daddr = amt->remote_ip;
785 iph->saddr = amt->local_ip;
786 iph->protocol = IPPROTO_UDP;
787 iph->tot_len = htons(len);
789 skb->ip_summed = CHECKSUM_NONE;
790 ip_select_ident(amt->net, skb, NULL);
792 err = ip_local_out(amt->net, sock->sk, skb);
793 if (unlikely(net_xmit_eval(err)))
794 amt->dev->stats.tx_errors++;
800 static void amt_send_igmp_gq(struct amt_dev *amt,
801 struct amt_tunnel_list *tunnel)
805 skb = amt_build_igmp_gq(amt);
809 amt_skb_cb(skb)->tunnel = tunnel;
813 #if IS_ENABLED(CONFIG_IPV6)
814 static struct sk_buff *amt_build_mld_gq(struct amt_dev *amt)
816 u8 ra[AMT_IP6HDR_OPTS] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
817 2, 0, 0, IPV6_TLV_PAD1, IPV6_TLV_PAD1 };
818 int hlen = LL_RESERVED_SPACE(amt->dev);
819 int tlen = amt->dev->needed_tailroom;
820 struct mld2_query *mld2q;
821 void *csum_start = NULL;
822 struct ipv6hdr *ip6h;
827 len = hlen + tlen + sizeof(*ip6h) + sizeof(ra) + sizeof(*mld2q);
828 skb = netdev_alloc_skb_ip_align(amt->dev, len);
832 skb_reserve(skb, hlen);
833 skb_push(skb, sizeof(*eth));
834 skb_reset_mac_header(skb);
836 skb->priority = TC_PRIO_CONTROL;
837 skb->protocol = htons(ETH_P_IPV6);
838 skb_put_zero(skb, sizeof(*ip6h));
839 skb_put_data(skb, ra, sizeof(ra));
840 skb_put_zero(skb, sizeof(*mld2q));
841 skb_pull(skb, sizeof(*eth));
842 skb_reset_network_header(skb);
843 ip6h = ipv6_hdr(skb);
844 ip6h->payload_len = htons(sizeof(ra) + sizeof(*mld2q));
845 ip6h->nexthdr = NEXTHDR_HOP;
847 ip6h->daddr = mld2_all_node;
848 ip6_flow_hdr(ip6h, 0, 0);
850 if (ipv6_dev_get_saddr(amt->net, amt->dev, &ip6h->daddr, 0,
852 amt->dev->stats.tx_errors++;
857 eth->h_proto = htons(ETH_P_IPV6);
858 ether_addr_copy(eth->h_source, amt->dev->dev_addr);
859 ipv6_eth_mc_map(&mld2_all_node, eth->h_dest);
861 skb_pull(skb, sizeof(*ip6h) + sizeof(ra));
862 skb_reset_transport_header(skb);
863 mld2q = (struct mld2_query *)icmp6_hdr(skb);
864 mld2q->mld2q_mrc = htons(1);
865 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
866 mld2q->mld2q_code = 0;
867 mld2q->mld2q_cksum = 0;
868 mld2q->mld2q_resv1 = 0;
869 mld2q->mld2q_resv2 = 0;
870 mld2q->mld2q_suppress = 0;
871 mld2q->mld2q_qrv = amt->qrv;
872 mld2q->mld2q_nsrcs = 0;
873 mld2q->mld2q_qqic = amt->qi;
874 csum_start = (void *)mld2q;
875 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
878 csum_partial(csum_start,
881 skb->ip_summed = CHECKSUM_NONE;
882 skb_push(skb, sizeof(*eth) + sizeof(*ip6h) + sizeof(ra));
886 static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
890 skb = amt_build_mld_gq(amt);
894 amt_skb_cb(skb)->tunnel = tunnel;
898 static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
903 static void amt_secret_work(struct work_struct *work)
905 struct amt_dev *amt = container_of(to_delayed_work(work),
909 spin_lock_bh(&amt->lock);
910 get_random_bytes(&amt->key, sizeof(siphash_key_t));
911 spin_unlock_bh(&amt->lock);
912 mod_delayed_work(amt_wq, &amt->secret_wq,
913 msecs_to_jiffies(AMT_SECRET_TIMEOUT));
916 static void amt_discovery_work(struct work_struct *work)
918 struct amt_dev *amt = container_of(to_delayed_work(work),
922 spin_lock_bh(&amt->lock);
923 if (amt->status > AMT_STATUS_SENT_DISCOVERY)
925 get_random_bytes(&amt->nonce, sizeof(__be32));
926 spin_unlock_bh(&amt->lock);
928 amt_send_discovery(amt);
929 spin_lock_bh(&amt->lock);
931 mod_delayed_work(amt_wq, &amt->discovery_wq,
932 msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
933 spin_unlock_bh(&amt->lock);
936 static void amt_req_work(struct work_struct *work)
938 struct amt_dev *amt = container_of(to_delayed_work(work),
943 spin_lock_bh(&amt->lock);
944 if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
947 if (amt->req_cnt > AMT_MAX_REQ_COUNT) {
948 netdev_dbg(amt->dev, "Gateway is not ready");
949 amt->qi = AMT_INIT_REQ_TIMEOUT;
953 __amt_update_gw_status(amt, AMT_STATUS_INIT, false);
957 spin_unlock_bh(&amt->lock);
959 amt_send_request(amt, false);
960 amt_send_request(amt, true);
961 spin_lock_bh(&amt->lock);
962 __amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
965 exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
966 mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
967 spin_unlock_bh(&amt->lock);
970 static bool amt_send_membership_update(struct amt_dev *amt,
974 struct amt_header_membership_update *amtmu;
981 sock = rcu_dereference_bh(amt->sock);
985 err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmu) +
986 sizeof(*iph) + sizeof(struct udphdr));
990 skb_reset_inner_headers(skb);
991 memset(&fl4, 0, sizeof(struct flowi4));
992 fl4.flowi4_oif = amt->stream_dev->ifindex;
993 fl4.daddr = amt->remote_ip;
994 fl4.saddr = amt->local_ip;
995 fl4.flowi4_tos = AMT_TOS;
996 fl4.flowi4_proto = IPPROTO_UDP;
997 rt = ip_route_output_key(amt->net, &fl4);
999 netdev_dbg(amt->dev, "no route to %pI4\n", &amt->remote_ip);
1003 amtmu = skb_push(skb, sizeof(*amtmu));
1005 amtmu->type = AMT_MSG_MEMBERSHIP_UPDATE;
1006 amtmu->reserved = 0;
1007 amtmu->nonce = amt->nonce;
1008 amtmu->response_mac = amt->mac;
1011 skb_set_inner_protocol(skb, htons(ETH_P_IP));
1013 skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1014 udp_tunnel_xmit_skb(rt, sock->sk, skb,
1018 ip4_dst_hoplimit(&rt->dst),
1024 amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
1028 static void amt_send_multicast_data(struct amt_dev *amt,
1029 const struct sk_buff *oskb,
1030 struct amt_tunnel_list *tunnel,
1033 struct amt_header_mcast_data *amtmd;
1034 struct socket *sock;
1035 struct sk_buff *skb;
1040 sock = rcu_dereference_bh(amt->sock);
1044 skb = skb_copy_expand(oskb, sizeof(*amtmd) + sizeof(*iph) +
1045 sizeof(struct udphdr), 0, GFP_ATOMIC);
1049 skb_reset_inner_headers(skb);
1050 memset(&fl4, 0, sizeof(struct flowi4));
1051 fl4.flowi4_oif = amt->stream_dev->ifindex;
1052 fl4.daddr = tunnel->ip4;
1053 fl4.saddr = amt->local_ip;
1054 fl4.flowi4_proto = IPPROTO_UDP;
1055 rt = ip_route_output_key(amt->net, &fl4);
1057 netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
1062 amtmd = skb_push(skb, sizeof(*amtmd));
1064 amtmd->reserved = 0;
1065 amtmd->type = AMT_MSG_MULTICAST_DATA;
1068 skb_set_inner_protocol(skb, htons(ETH_P_IP));
1070 skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1071 udp_tunnel_xmit_skb(rt, sock->sk, skb,
1075 ip4_dst_hoplimit(&rt->dst),
1078 tunnel->source_port,
1083 static bool amt_send_membership_query(struct amt_dev *amt,
1084 struct sk_buff *skb,
1085 struct amt_tunnel_list *tunnel,
1088 struct amt_header_membership_query *amtmq;
1089 struct socket *sock;
1094 sock = rcu_dereference_bh(amt->sock);
1098 err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmq) +
1099 sizeof(struct iphdr) + sizeof(struct udphdr));
1103 skb_reset_inner_headers(skb);
1104 memset(&fl4, 0, sizeof(struct flowi4));
1105 fl4.flowi4_oif = amt->stream_dev->ifindex;
1106 fl4.daddr = tunnel->ip4;
1107 fl4.saddr = amt->local_ip;
1108 fl4.flowi4_tos = AMT_TOS;
1109 fl4.flowi4_proto = IPPROTO_UDP;
1110 rt = ip_route_output_key(amt->net, &fl4);
1112 netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
1116 amtmq = skb_push(skb, sizeof(*amtmq));
1118 amtmq->type = AMT_MSG_MEMBERSHIP_QUERY;
1119 amtmq->reserved = 0;
1122 amtmq->nonce = tunnel->nonce;
1123 amtmq->response_mac = tunnel->mac;
1126 skb_set_inner_protocol(skb, htons(ETH_P_IP));
1128 skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
1129 udp_tunnel_xmit_skb(rt, sock->sk, skb,
1133 ip4_dst_hoplimit(&rt->dst),
1136 tunnel->source_port,
1139 amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true);
1143 static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
1145 struct amt_dev *amt = netdev_priv(dev);
1146 struct amt_tunnel_list *tunnel;
1147 struct amt_group_node *gnode;
1148 union amt_addr group = {0,};
1149 #if IS_ENABLED(CONFIG_IPV6)
1150 struct ipv6hdr *ip6h;
1151 struct mld_msg *mld;
1153 bool report = false;
1162 if (iph->version == 4) {
1163 if (!ipv4_is_multicast(iph->daddr))
1166 if (!ip_mc_check_igmp(skb)) {
1169 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1170 case IGMP_HOST_MEMBERSHIP_REPORT:
1173 case IGMP_HOST_MEMBERSHIP_QUERY:
1183 group.ip4 = iph->daddr;
1184 #if IS_ENABLED(CONFIG_IPV6)
1185 } else if (iph->version == 6) {
1186 ip6h = ipv6_hdr(skb);
1187 if (!ipv6_addr_is_multicast(&ip6h->daddr))
1190 if (!ipv6_mc_check_mld(skb)) {
1191 mld = (struct mld_msg *)skb_transport_header(skb);
1192 switch (mld->mld_type) {
1193 case ICMPV6_MGM_REPORT:
1194 case ICMPV6_MLD2_REPORT:
1197 case ICMPV6_MGM_QUERY:
1207 group.ip6 = ip6h->daddr;
1210 dev->stats.tx_errors++;
1214 if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
1217 skb_pull(skb, sizeof(struct ethhdr));
1219 if (amt->mode == AMT_MODE_GATEWAY) {
1220 /* Gateway only passes IGMP/MLD packets */
1223 if ((!v6 && !amt->ready4) || (v6 && !amt->ready6))
1225 if (amt_send_membership_update(amt, skb, v6))
1228 } else if (amt->mode == AMT_MODE_RELAY) {
1230 tunnel = amt_skb_cb(skb)->tunnel;
1236 /* Do not forward unexpected query */
1237 if (amt_send_membership_query(amt, skb, tunnel, v6))
1244 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
1245 hash = amt_group_hash(tunnel, &group);
1246 hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash],
1249 if (gnode->group_addr.ip4 == iph->daddr)
1251 #if IS_ENABLED(CONFIG_IPV6)
1253 if (ipv6_addr_equal(&gnode->group_addr.ip6,
1261 amt_send_multicast_data(amt, skb, tunnel, v6);
1266 return NETDEV_TX_OK;
1270 dev->stats.tx_dropped++;
1271 return NETDEV_TX_OK;
1274 static int amt_parse_type(struct sk_buff *skb)
1276 struct amt_header *amth;
1278 if (!pskb_may_pull(skb, sizeof(struct udphdr) +
1279 sizeof(struct amt_header)))
1282 amth = (struct amt_header *)(udp_hdr(skb) + 1);
1284 if (amth->version != 0)
1287 if (amth->type >= __AMT_MSG_MAX || !amth->type)
1292 static void amt_clear_groups(struct amt_tunnel_list *tunnel)
1294 struct amt_dev *amt = tunnel->amt;
1295 struct amt_group_node *gnode;
1296 struct hlist_node *t;
1299 spin_lock_bh(&tunnel->lock);
1301 for (i = 0; i < amt->hash_buckets; i++)
1302 hlist_for_each_entry_safe(gnode, t, &tunnel->groups[i], node)
1303 amt_del_group(amt, gnode);
1305 spin_unlock_bh(&tunnel->lock);
1308 static void amt_tunnel_expire(struct work_struct *work)
1310 struct amt_tunnel_list *tunnel = container_of(to_delayed_work(work),
1311 struct amt_tunnel_list,
1313 struct amt_dev *amt = tunnel->amt;
1315 spin_lock_bh(&amt->lock);
1317 list_del_rcu(&tunnel->list);
1319 amt_clear_groups(tunnel);
1321 spin_unlock_bh(&amt->lock);
1322 kfree_rcu(tunnel, rcu);
1325 static void amt_cleanup_srcs(struct amt_dev *amt,
1326 struct amt_tunnel_list *tunnel,
1327 struct amt_group_node *gnode)
1329 struct amt_source_node *snode;
1330 struct hlist_node *t;
1333 /* Delete old sources */
1334 for (i = 0; i < amt->hash_buckets; i++) {
1335 hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) {
1336 if (snode->flags == AMT_SOURCE_OLD)
1337 amt_destroy_source(snode);
1341 /* switch from new to old */
1342 for (i = 0; i < amt->hash_buckets; i++) {
1343 hlist_for_each_entry_rcu(snode, &gnode->sources[i], node) {
1344 snode->flags = AMT_SOURCE_OLD;
1346 netdev_dbg(snode->gnode->amt->dev,
1347 "Add source as OLD %pI4 from %pI4\n",
1348 &snode->source_addr.ip4,
1349 &gnode->group_addr.ip4);
1350 #if IS_ENABLED(CONFIG_IPV6)
1352 netdev_dbg(snode->gnode->amt->dev,
1353 "Add source as OLD %pI6 from %pI6\n",
1354 &snode->source_addr.ip6,
1355 &gnode->group_addr.ip6);
1361 static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
1362 struct amt_group_node *gnode, void *grec,
1365 struct igmpv3_grec *igmp_grec;
1366 struct amt_source_node *snode;
1367 #if IS_ENABLED(CONFIG_IPV6)
1368 struct mld2_grec *mld_grec;
1370 union amt_addr src = {0,};
1376 igmp_grec = (struct igmpv3_grec *)grec;
1377 nsrcs = ntohs(igmp_grec->grec_nsrcs);
1379 #if IS_ENABLED(CONFIG_IPV6)
1380 mld_grec = (struct mld2_grec *)grec;
1381 nsrcs = ntohs(mld_grec->grec_nsrcs);
1386 for (i = 0; i < nsrcs; i++) {
1387 if (tunnel->nr_sources >= amt->max_sources)
1390 src.ip4 = igmp_grec->grec_src[i];
1391 #if IS_ENABLED(CONFIG_IPV6)
1393 memcpy(&src.ip6, &mld_grec->grec_src[i],
1394 sizeof(struct in6_addr));
1396 if (amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL, &src))
1399 snode = amt_alloc_snode(gnode, &src);
1401 hash = amt_source_hash(tunnel, &snode->source_addr);
1402 hlist_add_head_rcu(&snode->node, &gnode->sources[hash]);
1403 tunnel->nr_sources++;
1404 gnode->nr_sources++;
1407 netdev_dbg(snode->gnode->amt->dev,
1408 "Add source as NEW %pI4 from %pI4\n",
1409 &snode->source_addr.ip4,
1410 &gnode->group_addr.ip4);
1411 #if IS_ENABLED(CONFIG_IPV6)
1413 netdev_dbg(snode->gnode->amt->dev,
1414 "Add source as NEW %pI6 from %pI6\n",
1415 &snode->source_addr.ip6,
1416 &gnode->group_addr.ip6);
1422 /* Router State Report Rec'd New Router State
1423 * ------------ ------------ ----------------
1424 * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A)
1426 * -----------+-----------+-----------+
1428 * -----------+-----------+-----------+
1430 * -----------+-----------+-----------+
1432 * -----------+-----------+-----------+
1434 * -----------+-----------+-----------+
1436 * a) Received sources are NONE/NEW
1437 * b) All NONE will be deleted by amt_cleanup_srcs().
1438 * c) All OLD will be deleted by amt_cleanup_srcs().
1439 * d) After delete, NEW source will be switched to OLD.
1441 static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel,
1442 struct amt_group_node *gnode,
1445 enum amt_filter filter,
1449 struct amt_dev *amt = tunnel->amt;
1450 struct amt_source_node *snode;
1451 struct igmpv3_grec *igmp_grec;
1452 #if IS_ENABLED(CONFIG_IPV6)
1453 struct mld2_grec *mld_grec;
1455 union amt_addr src = {0,};
1456 struct hlist_node *t;
1461 igmp_grec = (struct igmpv3_grec *)grec;
1462 nsrcs = ntohs(igmp_grec->grec_nsrcs);
1464 #if IS_ENABLED(CONFIG_IPV6)
1465 mld_grec = (struct mld2_grec *)grec;
1466 nsrcs = ntohs(mld_grec->grec_nsrcs);
1472 memset(&src, 0, sizeof(union amt_addr));
1476 for (i = 0; i < nsrcs; i++) {
1478 src.ip4 = igmp_grec->grec_src[i];
1479 #if IS_ENABLED(CONFIG_IPV6)
1481 memcpy(&src.ip6, &mld_grec->grec_src[i],
1482 sizeof(struct in6_addr));
1484 snode = amt_lookup_src(tunnel, gnode, filter, &src);
1487 amt_act_src(tunnel, gnode, snode, act);
1492 for (i = 0; i < amt->hash_buckets; i++) {
1493 hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
1495 if (amt_status_filter(snode, filter))
1496 amt_act_src(tunnel, gnode, snode, act);
1499 for (i = 0; i < nsrcs; i++) {
1501 src.ip4 = igmp_grec->grec_src[i];
1502 #if IS_ENABLED(CONFIG_IPV6)
1504 memcpy(&src.ip6, &mld_grec->grec_src[i],
1505 sizeof(struct in6_addr));
1507 snode = amt_lookup_src(tunnel, gnode, filter, &src);
1510 amt_act_src(tunnel, gnode, snode, act);
1515 for (i = 0; i < amt->hash_buckets; i++) {
1516 hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
1518 if (!amt_status_filter(snode, filter))
1520 for (j = 0; j < nsrcs; j++) {
1522 src.ip4 = igmp_grec->grec_src[j];
1523 #if IS_ENABLED(CONFIG_IPV6)
1526 &mld_grec->grec_src[j],
1527 sizeof(struct in6_addr));
1529 if (amt_addr_equal(&snode->source_addr,
1533 amt_act_src(tunnel, gnode, snode, act);
1539 case AMT_OPS_SUB_REV:
1541 for (i = 0; i < nsrcs; i++) {
1543 src.ip4 = igmp_grec->grec_src[i];
1544 #if IS_ENABLED(CONFIG_IPV6)
1546 memcpy(&src.ip6, &mld_grec->grec_src[i],
1547 sizeof(struct in6_addr));
1549 snode = amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL,
1552 snode = amt_lookup_src(tunnel, gnode,
1555 amt_act_src(tunnel, gnode, snode, act);
1560 netdev_dbg(amt->dev, "Invalid type\n");
1565 static void amt_mcast_is_in_handler(struct amt_dev *amt,
1566 struct amt_tunnel_list *tunnel,
1567 struct amt_group_node *gnode,
1568 void *grec, void *zero_grec, bool v6)
1570 if (gnode->filter_mode == MCAST_INCLUDE) {
1571 /* Router State Report Rec'd New Router State Actions
1572 * ------------ ------------ ---------------- -------
1573 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
1575 /* Update IS_IN (B) as FWD/NEW */
1576 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1577 AMT_FILTER_NONE_NEW,
1578 AMT_ACT_STATUS_FWD_NEW,
1580 /* Update INCLUDE (A) as NEW */
1581 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1583 AMT_ACT_STATUS_FWD_NEW,
1586 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1592 * ------------ ------------ ---------------- -------
1593 * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
1595 /* Update (A) in (X, Y) as NONE/NEW */
1596 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1598 AMT_ACT_STATUS_NONE_NEW,
1600 /* Update FWD/OLD as FWD/NEW */
1601 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1603 AMT_ACT_STATUS_FWD_NEW,
1605 /* Update IS_IN (A) as FWD/NEW */
1606 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1607 AMT_FILTER_NONE_NEW,
1608 AMT_ACT_STATUS_FWD_NEW,
1610 /* Update EXCLUDE (, Y-A) as D_FWD_NEW */
1611 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
1613 AMT_ACT_STATUS_D_FWD_NEW,
1618 static void amt_mcast_is_ex_handler(struct amt_dev *amt,
1619 struct amt_tunnel_list *tunnel,
1620 struct amt_group_node *gnode,
1621 void *grec, void *zero_grec, bool v6)
1623 if (gnode->filter_mode == MCAST_INCLUDE) {
1624 /* Router State Report Rec'd New Router State Actions
1625 * ------------ ------------ ---------------- -------
1626 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1630 /* EXCLUDE(A*B, ) */
1631 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1633 AMT_ACT_STATUS_FWD_NEW,
1635 /* EXCLUDE(, B-A) */
1636 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1638 AMT_ACT_STATUS_D_FWD_NEW,
1641 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1642 AMT_FILTER_D_FWD_NEW,
1645 /* Group Timer=GMI */
1646 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1647 msecs_to_jiffies(amt_gmi(amt))))
1649 gnode->filter_mode = MCAST_EXCLUDE;
1650 /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
1652 /* Router State Report Rec'd New Router State Actions
1653 * ------------ ------------ ---------------- -------
1654 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
1659 /* EXCLUDE (A-Y, ) */
1660 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1662 AMT_ACT_STATUS_FWD_NEW,
1664 /* EXCLUDE (, Y*A ) */
1665 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1667 AMT_ACT_STATUS_D_FWD_NEW,
1670 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1671 AMT_FILTER_BOTH_NEW,
1674 /* Group Timer=GMI */
1675 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1676 msecs_to_jiffies(amt_gmi(amt))))
1678 /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
1682 static void amt_mcast_to_in_handler(struct amt_dev *amt,
1683 struct amt_tunnel_list *tunnel,
1684 struct amt_group_node *gnode,
1685 void *grec, void *zero_grec, bool v6)
1687 if (gnode->filter_mode == MCAST_INCLUDE) {
1688 /* Router State Report Rec'd New Router State Actions
1689 * ------------ ------------ ---------------- -------
1690 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
1693 /* Update TO_IN (B) sources as FWD/NEW */
1694 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1695 AMT_FILTER_NONE_NEW,
1696 AMT_ACT_STATUS_FWD_NEW,
1698 /* Update INCLUDE (A) sources as NEW */
1699 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1701 AMT_ACT_STATUS_FWD_NEW,
1704 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1709 /* Router State Report Rec'd New Router State Actions
1710 * ------------ ------------ ---------------- -------
1711 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
1715 /* Update TO_IN (A) sources as FWD/NEW */
1716 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1717 AMT_FILTER_NONE_NEW,
1718 AMT_ACT_STATUS_FWD_NEW,
1720 /* Update EXCLUDE(X,) sources as FWD/NEW */
1721 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1723 AMT_ACT_STATUS_FWD_NEW,
1726 * (A) are already switched to FWD_NEW.
1727 * So, D_FWD/OLD -> D_FWD/NEW is okay.
1729 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1731 AMT_ACT_STATUS_D_FWD_NEW,
1734 * Only FWD_NEW will have (A) sources.
1736 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1743 static void amt_mcast_to_ex_handler(struct amt_dev *amt,
1744 struct amt_tunnel_list *tunnel,
1745 struct amt_group_node *gnode,
1746 void *grec, void *zero_grec, bool v6)
1748 if (gnode->filter_mode == MCAST_INCLUDE) {
1749 /* Router State Report Rec'd New Router State Actions
1750 * ------------ ------------ ---------------- -------
1751 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1756 /* EXCLUDE (A*B, ) */
1757 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1759 AMT_ACT_STATUS_FWD_NEW,
1761 /* EXCLUDE (, B-A) */
1762 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1764 AMT_ACT_STATUS_D_FWD_NEW,
1767 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1768 AMT_FILTER_D_FWD_NEW,
1771 /* Group Timer=GMI */
1772 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1773 msecs_to_jiffies(amt_gmi(amt))))
1775 gnode->filter_mode = MCAST_EXCLUDE;
1776 /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
1778 /* Router State Report Rec'd New Router State Actions
1779 * ------------ ------------ ---------------- -------
1780 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
1786 /* Update (A-X-Y) as NONE/OLD */
1787 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1791 /* EXCLUDE (A-Y, ) */
1792 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1794 AMT_ACT_STATUS_FWD_NEW,
1796 /* EXCLUDE (, Y*A) */
1797 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1799 AMT_ACT_STATUS_D_FWD_NEW,
1801 /* Group Timer=GMI */
1802 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1803 msecs_to_jiffies(amt_gmi(amt))))
1805 /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
1809 static void amt_mcast_allow_handler(struct amt_dev *amt,
1810 struct amt_tunnel_list *tunnel,
1811 struct amt_group_node *gnode,
1812 void *grec, void *zero_grec, bool v6)
1814 if (gnode->filter_mode == MCAST_INCLUDE) {
1815 /* Router State Report Rec'd New Router State Actions
1816 * ------------ ------------ ---------------- -------
1817 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
1820 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1822 AMT_ACT_STATUS_FWD_NEW,
1825 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1830 /* Router State Report Rec'd New Router State Actions
1831 * ------------ ------------ ---------------- -------
1832 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
1834 /* EXCLUDE (X+A, ) */
1835 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1837 AMT_ACT_STATUS_FWD_NEW,
1839 /* EXCLUDE (, Y-A) */
1840 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
1842 AMT_ACT_STATUS_D_FWD_NEW,
1845 * All (A) source are now FWD/NEW status.
1847 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
1854 static void amt_mcast_block_handler(struct amt_dev *amt,
1855 struct amt_tunnel_list *tunnel,
1856 struct amt_group_node *gnode,
1857 void *grec, void *zero_grec, bool v6)
1859 if (gnode->filter_mode == MCAST_INCLUDE) {
1860 /* Router State Report Rec'd New Router State Actions
1861 * ------------ ------------ ---------------- -------
1862 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
1865 amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
1867 AMT_ACT_STATUS_FWD_NEW,
1870 /* Router State Report Rec'd New Router State Actions
1871 * ------------ ------------ ---------------- -------
1872 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
1875 /* (A-X-Y)=Group Timer */
1876 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1881 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1883 AMT_ACT_STATUS_FWD_NEW,
1885 /* EXCLUDE (X+(A-Y) */
1886 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
1888 AMT_ACT_STATUS_FWD_NEW,
1891 amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
1893 AMT_ACT_STATUS_D_FWD_NEW,
1899 * 7.3.2. In the Presence of Older Version Group Members
1901 * When Group Compatibility Mode is IGMPv2, a router internally
1902 * translates the following IGMPv2 messages for that group to their
1903 * IGMPv3 equivalents:
1905 * IGMPv2 Message IGMPv3 Equivalent
1906 * -------------- -----------------
1907 * Report IS_EX( {} )
1910 static void amt_igmpv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
1911 struct amt_tunnel_list *tunnel)
1913 struct igmphdr *ih = igmp_hdr(skb);
1914 struct iphdr *iph = ip_hdr(skb);
1915 struct amt_group_node *gnode;
1916 union amt_addr group, host;
1918 memset(&group, 0, sizeof(union amt_addr));
1919 group.ip4 = ih->group;
1920 memset(&host, 0, sizeof(union amt_addr));
1921 host.ip4 = iph->saddr;
1923 gnode = amt_lookup_group(tunnel, &group, &host, false);
1925 gnode = amt_add_group(amt, tunnel, &group, &host, false);
1926 if (!IS_ERR(gnode)) {
1927 gnode->filter_mode = MCAST_EXCLUDE;
1928 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
1929 msecs_to_jiffies(amt_gmi(amt))))
1936 * 7.3.2. In the Presence of Older Version Group Members
1938 * When Group Compatibility Mode is IGMPv2, a router internally
1939 * translates the following IGMPv2 messages for that group to their
1940 * IGMPv3 equivalents:
1942 * IGMPv2 Message IGMPv3 Equivalent
1943 * -------------- -----------------
1944 * Report IS_EX( {} )
1947 static void amt_igmpv2_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
1948 struct amt_tunnel_list *tunnel)
1950 struct igmphdr *ih = igmp_hdr(skb);
1951 struct iphdr *iph = ip_hdr(skb);
1952 struct amt_group_node *gnode;
1953 union amt_addr group, host;
1955 memset(&group, 0, sizeof(union amt_addr));
1956 group.ip4 = ih->group;
1957 memset(&host, 0, sizeof(union amt_addr));
1958 host.ip4 = iph->saddr;
1960 gnode = amt_lookup_group(tunnel, &group, &host, false);
1962 amt_del_group(amt, gnode);
1965 static void amt_igmpv3_report_handler(struct amt_dev *amt, struct sk_buff *skb,
1966 struct amt_tunnel_list *tunnel)
1968 struct igmpv3_report *ihrv3 = igmpv3_report_hdr(skb);
1969 int len = skb_transport_offset(skb) + sizeof(*ihrv3);
1970 void *zero_grec = (void *)&igmpv3_zero_grec;
1971 struct iphdr *iph = ip_hdr(skb);
1972 struct amt_group_node *gnode;
1973 union amt_addr group, host;
1974 struct igmpv3_grec *grec;
1978 for (i = 0; i < ntohs(ihrv3->ngrec); i++) {
1979 len += sizeof(*grec);
1980 if (!ip_mc_may_pull(skb, len))
1983 grec = (void *)(skb->data + len - sizeof(*grec));
1984 nsrcs = ntohs(grec->grec_nsrcs);
1986 len += nsrcs * sizeof(__be32);
1987 if (!ip_mc_may_pull(skb, len))
1990 memset(&group, 0, sizeof(union amt_addr));
1991 group.ip4 = grec->grec_mca;
1992 memset(&host, 0, sizeof(union amt_addr));
1993 host.ip4 = iph->saddr;
1994 gnode = amt_lookup_group(tunnel, &group, &host, false);
1996 gnode = amt_add_group(amt, tunnel, &group, &host,
2002 amt_add_srcs(amt, tunnel, gnode, grec, false);
2003 switch (grec->grec_type) {
2004 case IGMPV3_MODE_IS_INCLUDE:
2005 amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
2008 case IGMPV3_MODE_IS_EXCLUDE:
2009 amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
2012 case IGMPV3_CHANGE_TO_INCLUDE:
2013 amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
2016 case IGMPV3_CHANGE_TO_EXCLUDE:
2017 amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
2020 case IGMPV3_ALLOW_NEW_SOURCES:
2021 amt_mcast_allow_handler(amt, tunnel, gnode, grec,
2024 case IGMPV3_BLOCK_OLD_SOURCES:
2025 amt_mcast_block_handler(amt, tunnel, gnode, grec,
2031 amt_cleanup_srcs(amt, tunnel, gnode);
2035 /* caller held tunnel->lock */
2036 static void amt_igmp_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2037 struct amt_tunnel_list *tunnel)
2039 struct igmphdr *ih = igmp_hdr(skb);
2042 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2043 amt_igmpv3_report_handler(amt, skb, tunnel);
2045 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2046 amt_igmpv2_report_handler(amt, skb, tunnel);
2048 case IGMP_HOST_LEAVE_MESSAGE:
2049 amt_igmpv2_leave_handler(amt, skb, tunnel);
2056 #if IS_ENABLED(CONFIG_IPV6)
2058 * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
2060 * When Multicast Address Compatibility Mode is MLDv2, a router acts
2061 * using the MLDv2 protocol for that multicast address. When Multicast
2062 * Address Compatibility Mode is MLDv1, a router internally translates
2063 * the following MLDv1 messages for that multicast address to their
2064 * MLDv2 equivalents:
2066 * MLDv1 Message MLDv2 Equivalent
2067 * -------------- -----------------
2068 * Report IS_EX( {} )
2071 static void amt_mldv1_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2072 struct amt_tunnel_list *tunnel)
2074 struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2075 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2076 struct amt_group_node *gnode;
2077 union amt_addr group, host;
2079 memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
2080 memcpy(&host.ip6, &ip6h->saddr, sizeof(struct in6_addr));
2082 gnode = amt_lookup_group(tunnel, &group, &host, true);
2084 gnode = amt_add_group(amt, tunnel, &group, &host, true);
2085 if (!IS_ERR(gnode)) {
2086 gnode->filter_mode = MCAST_EXCLUDE;
2087 if (!mod_delayed_work(amt_wq, &gnode->group_timer,
2088 msecs_to_jiffies(amt_gmi(amt))))
2095 * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
2097 * When Multicast Address Compatibility Mode is MLDv2, a router acts
2098 * using the MLDv2 protocol for that multicast address. When Multicast
2099 * Address Compatibility Mode is MLDv1, a router internally translates
2100 * the following MLDv1 messages for that multicast address to their
2101 * MLDv2 equivalents:
2103 * MLDv1 Message MLDv2 Equivalent
2104 * -------------- -----------------
2105 * Report IS_EX( {} )
2108 static void amt_mldv1_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
2109 struct amt_tunnel_list *tunnel)
2111 struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2112 struct iphdr *iph = ip_hdr(skb);
2113 struct amt_group_node *gnode;
2114 union amt_addr group, host;
2116 memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
2117 memset(&host, 0, sizeof(union amt_addr));
2118 host.ip4 = iph->saddr;
2120 gnode = amt_lookup_group(tunnel, &group, &host, true);
2122 amt_del_group(amt, gnode);
2127 static void amt_mldv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2128 struct amt_tunnel_list *tunnel)
2130 struct mld2_report *mld2r = (struct mld2_report *)icmp6_hdr(skb);
2131 int len = skb_transport_offset(skb) + sizeof(*mld2r);
2132 void *zero_grec = (void *)&mldv2_zero_grec;
2133 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2134 struct amt_group_node *gnode;
2135 union amt_addr group, host;
2136 struct mld2_grec *grec;
2140 for (i = 0; i < ntohs(mld2r->mld2r_ngrec); i++) {
2141 len += sizeof(*grec);
2142 if (!ipv6_mc_may_pull(skb, len))
2145 grec = (void *)(skb->data + len - sizeof(*grec));
2146 nsrcs = ntohs(grec->grec_nsrcs);
2148 len += nsrcs * sizeof(struct in6_addr);
2149 if (!ipv6_mc_may_pull(skb, len))
2152 memset(&group, 0, sizeof(union amt_addr));
2153 group.ip6 = grec->grec_mca;
2154 memset(&host, 0, sizeof(union amt_addr));
2155 host.ip6 = ip6h->saddr;
2156 gnode = amt_lookup_group(tunnel, &group, &host, true);
2158 gnode = amt_add_group(amt, tunnel, &group, &host,
2164 amt_add_srcs(amt, tunnel, gnode, grec, true);
2165 switch (grec->grec_type) {
2166 case MLD2_MODE_IS_INCLUDE:
2167 amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
2170 case MLD2_MODE_IS_EXCLUDE:
2171 amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
2174 case MLD2_CHANGE_TO_INCLUDE:
2175 amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
2178 case MLD2_CHANGE_TO_EXCLUDE:
2179 amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
2182 case MLD2_ALLOW_NEW_SOURCES:
2183 amt_mcast_allow_handler(amt, tunnel, gnode, grec,
2186 case MLD2_BLOCK_OLD_SOURCES:
2187 amt_mcast_block_handler(amt, tunnel, gnode, grec,
2193 amt_cleanup_srcs(amt, tunnel, gnode);
2197 /* caller held tunnel->lock */
2198 static void amt_mld_report_handler(struct amt_dev *amt, struct sk_buff *skb,
2199 struct amt_tunnel_list *tunnel)
2201 struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
2203 switch (mld->mld_type) {
2204 case ICMPV6_MGM_REPORT:
2205 amt_mldv1_report_handler(amt, skb, tunnel);
2207 case ICMPV6_MLD2_REPORT:
2208 amt_mldv2_report_handler(amt, skb, tunnel);
2210 case ICMPV6_MGM_REDUCTION:
2211 amt_mldv1_leave_handler(amt, skb, tunnel);
2219 static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
2221 struct amt_header_advertisement *amta;
2224 hdr_size = sizeof(*amta) + sizeof(struct udphdr);
2225 if (!pskb_may_pull(skb, hdr_size))
2228 amta = (struct amt_header_advertisement *)(udp_hdr(skb) + 1);
2232 if (amta->reserved || amta->version)
2235 if (ipv4_is_loopback(amta->ip4) || ipv4_is_multicast(amta->ip4) ||
2236 ipv4_is_zeronet(amta->ip4))
2239 amt->remote_ip = amta->ip4;
2240 netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
2241 mod_delayed_work(amt_wq, &amt->req_wq, 0);
2243 amt_update_gw_status(amt, AMT_STATUS_RECEIVED_ADVERTISEMENT, true);
2247 static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
2249 struct amt_header_mcast_data *amtmd;
2250 int hdr_size, len, err;
2254 hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
2255 if (!pskb_may_pull(skb, hdr_size))
2258 amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
2259 if (amtmd->reserved || amtmd->version)
2262 if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
2265 skb_reset_network_header(skb);
2266 skb_push(skb, sizeof(*eth));
2267 skb_reset_mac_header(skb);
2268 skb_pull(skb, sizeof(*eth));
2271 if (!pskb_may_pull(skb, sizeof(*iph)))
2275 if (iph->version == 4) {
2276 if (!ipv4_is_multicast(iph->daddr))
2278 skb->protocol = htons(ETH_P_IP);
2279 eth->h_proto = htons(ETH_P_IP);
2280 ip_eth_mc_map(iph->daddr, eth->h_dest);
2281 #if IS_ENABLED(CONFIG_IPV6)
2282 } else if (iph->version == 6) {
2283 struct ipv6hdr *ip6h;
2285 if (!pskb_may_pull(skb, sizeof(*ip6h)))
2288 ip6h = ipv6_hdr(skb);
2289 if (!ipv6_addr_is_multicast(&ip6h->daddr))
2291 skb->protocol = htons(ETH_P_IPV6);
2292 eth->h_proto = htons(ETH_P_IPV6);
2293 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2299 skb->pkt_type = PACKET_MULTICAST;
2300 skb->ip_summed = CHECKSUM_NONE;
2302 err = gro_cells_receive(&amt->gro_cells, skb);
2303 if (likely(err == NET_RX_SUCCESS))
2304 dev_sw_netstats_rx_add(amt->dev, len);
2306 amt->dev->stats.rx_dropped++;
2311 static bool amt_membership_query_handler(struct amt_dev *amt,
2312 struct sk_buff *skb)
2314 struct amt_header_membership_query *amtmq;
2315 struct igmpv3_query *ihv3;
2316 struct ethhdr *eth, *oeth;
2320 hdr_size = sizeof(*amtmq) + sizeof(struct udphdr);
2321 if (!pskb_may_pull(skb, hdr_size))
2324 amtmq = (struct amt_header_membership_query *)(udp_hdr(skb) + 1);
2325 if (amtmq->reserved || amtmq->version)
2328 hdr_size -= sizeof(*eth);
2329 if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
2332 oeth = eth_hdr(skb);
2333 skb_reset_mac_header(skb);
2334 skb_pull(skb, sizeof(*eth));
2335 skb_reset_network_header(skb);
2337 if (!pskb_may_pull(skb, sizeof(*iph)))
2341 if (iph->version == 4) {
2342 if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
2346 if (!ipv4_is_multicast(iph->daddr))
2349 ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
2350 skb_reset_transport_header(skb);
2351 skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
2352 spin_lock_bh(&amt->lock);
2354 amt->mac = amtmq->response_mac;
2356 amt->qi = ihv3->qqic;
2357 spin_unlock_bh(&amt->lock);
2358 skb->protocol = htons(ETH_P_IP);
2359 eth->h_proto = htons(ETH_P_IP);
2360 ip_eth_mc_map(iph->daddr, eth->h_dest);
2361 #if IS_ENABLED(CONFIG_IPV6)
2362 } else if (iph->version == 6) {
2363 struct mld2_query *mld2q;
2364 struct ipv6hdr *ip6h;
2366 if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
2370 ip6h = ipv6_hdr(skb);
2371 if (!ipv6_addr_is_multicast(&ip6h->daddr))
2374 mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
2375 skb_reset_transport_header(skb);
2376 skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
2377 spin_lock_bh(&amt->lock);
2379 amt->mac = amtmq->response_mac;
2381 amt->qi = mld2q->mld2q_qqic;
2382 spin_unlock_bh(&amt->lock);
2383 skb->protocol = htons(ETH_P_IPV6);
2384 eth->h_proto = htons(ETH_P_IPV6);
2385 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2391 ether_addr_copy(eth->h_source, oeth->h_source);
2392 skb->pkt_type = PACKET_MULTICAST;
2393 skb->ip_summed = CHECKSUM_NONE;
2395 if (__netif_rx(skb) == NET_RX_SUCCESS) {
2396 amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
2397 dev_sw_netstats_rx_add(amt->dev, len);
2399 amt->dev->stats.rx_dropped++;
2405 static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
2407 struct amt_header_membership_update *amtmu;
2408 struct amt_tunnel_list *tunnel;
2415 hdr_size = sizeof(*amtmu) + sizeof(struct udphdr);
2416 if (!pskb_may_pull(skb, hdr_size))
2419 amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1);
2420 if (amtmu->reserved || amtmu->version)
2423 if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false))
2426 skb_reset_network_header(skb);
2428 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
2429 if (tunnel->ip4 == iph->saddr) {
2430 if ((amtmu->nonce == tunnel->nonce &&
2431 amtmu->response_mac == tunnel->mac)) {
2432 mod_delayed_work(amt_wq, &tunnel->gc_wq,
2433 msecs_to_jiffies(amt_gmi(amt))
2437 netdev_dbg(amt->dev, "Invalid MAC\n");
2446 if (!pskb_may_pull(skb, sizeof(*iph)))
2450 if (iph->version == 4) {
2451 if (ip_mc_check_igmp(skb)) {
2452 netdev_dbg(amt->dev, "Invalid IGMP\n");
2456 spin_lock_bh(&tunnel->lock);
2457 amt_igmp_report_handler(amt, skb, tunnel);
2458 spin_unlock_bh(&tunnel->lock);
2460 skb_push(skb, sizeof(struct ethhdr));
2461 skb_reset_mac_header(skb);
2463 skb->protocol = htons(ETH_P_IP);
2464 eth->h_proto = htons(ETH_P_IP);
2465 ip_eth_mc_map(iph->daddr, eth->h_dest);
2466 #if IS_ENABLED(CONFIG_IPV6)
2467 } else if (iph->version == 6) {
2468 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2470 if (ipv6_mc_check_mld(skb)) {
2471 netdev_dbg(amt->dev, "Invalid MLD\n");
2475 spin_lock_bh(&tunnel->lock);
2476 amt_mld_report_handler(amt, skb, tunnel);
2477 spin_unlock_bh(&tunnel->lock);
2479 skb_push(skb, sizeof(struct ethhdr));
2480 skb_reset_mac_header(skb);
2482 skb->protocol = htons(ETH_P_IPV6);
2483 eth->h_proto = htons(ETH_P_IPV6);
2484 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
2487 netdev_dbg(amt->dev, "Unsupported Protocol\n");
2491 skb_pull(skb, sizeof(struct ethhdr));
2492 skb->pkt_type = PACKET_MULTICAST;
2493 skb->ip_summed = CHECKSUM_NONE;
2495 if (__netif_rx(skb) == NET_RX_SUCCESS) {
2496 amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
2498 dev_sw_netstats_rx_add(amt->dev, len);
2500 amt->dev->stats.rx_dropped++;
2506 static void amt_send_advertisement(struct amt_dev *amt, __be32 nonce,
2507 __be32 daddr, __be16 dport)
2509 struct amt_header_advertisement *amta;
2510 int hlen, tlen, offset;
2511 struct socket *sock;
2512 struct udphdr *udph;
2513 struct sk_buff *skb;
2521 sock = rcu_dereference(amt->sock);
2525 if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
2528 rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
2529 daddr, amt->local_ip,
2530 dport, amt->relay_port,
2532 amt->stream_dev->ifindex);
2534 amt->dev->stats.tx_errors++;
2538 hlen = LL_RESERVED_SPACE(amt->dev);
2539 tlen = amt->dev->needed_tailroom;
2540 len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
2541 skb = netdev_alloc_skb_ip_align(amt->dev, len);
2544 amt->dev->stats.tx_errors++;
2548 skb->priority = TC_PRIO_CONTROL;
2549 skb_dst_set(skb, &rt->dst);
2551 len = sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
2552 skb_reset_network_header(skb);
2554 amta = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
2556 amta->type = AMT_MSG_ADVERTISEMENT;
2558 amta->nonce = nonce;
2559 amta->ip4 = amt->local_ip;
2560 skb_push(skb, sizeof(*udph));
2561 skb_reset_transport_header(skb);
2562 udph = udp_hdr(skb);
2563 udph->source = amt->relay_port;
2565 udph->len = htons(sizeof(*amta) + sizeof(*udph));
2567 offset = skb_transport_offset(skb);
2568 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
2569 udph->check = csum_tcpudp_magic(amt->local_ip, daddr,
2570 sizeof(*udph) + sizeof(*amta),
2571 IPPROTO_UDP, skb->csum);
2573 skb_push(skb, sizeof(*iph));
2576 iph->ihl = (sizeof(struct iphdr)) >> 2;
2579 iph->ttl = ip4_dst_hoplimit(&rt->dst);
2581 iph->saddr = amt->local_ip;
2582 iph->protocol = IPPROTO_UDP;
2583 iph->tot_len = htons(len);
2585 skb->ip_summed = CHECKSUM_NONE;
2586 ip_select_ident(amt->net, skb, NULL);
2588 err = ip_local_out(amt->net, sock->sk, skb);
2589 if (unlikely(net_xmit_eval(err)))
2590 amt->dev->stats.tx_errors++;
2596 static bool amt_discovery_handler(struct amt_dev *amt, struct sk_buff *skb)
2598 struct amt_header_discovery *amtd;
2599 struct udphdr *udph;
2602 if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtd)))
2606 udph = udp_hdr(skb);
2607 amtd = (struct amt_header_discovery *)(udp_hdr(skb) + 1);
2609 if (amtd->reserved || amtd->version)
2612 amt_send_advertisement(amt, amtd->nonce, iph->saddr, udph->source);
2617 static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
2619 struct amt_header_request *amtrh;
2620 struct amt_tunnel_list *tunnel;
2621 unsigned long long key;
2622 struct udphdr *udph;
2627 if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtrh)))
2631 udph = udp_hdr(skb);
2632 amtrh = (struct amt_header_request *)(udp_hdr(skb) + 1);
2634 if (amtrh->reserved1 || amtrh->reserved2 || amtrh->version)
2637 list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list)
2638 if (tunnel->ip4 == iph->saddr)
2641 if (amt->nr_tunnels >= amt->max_tunnels) {
2642 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
2646 tunnel = kzalloc(sizeof(*tunnel) +
2647 (sizeof(struct hlist_head) * amt->hash_buckets),
2652 tunnel->source_port = udph->source;
2653 tunnel->ip4 = iph->saddr;
2655 memcpy(&key, &tunnel->key, sizeof(unsigned long long));
2657 spin_lock_init(&tunnel->lock);
2658 for (i = 0; i < amt->hash_buckets; i++)
2659 INIT_HLIST_HEAD(&tunnel->groups[i]);
2661 INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
2663 spin_lock_bh(&amt->lock);
2664 list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
2665 tunnel->key = amt->key;
2666 amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
2668 mod_delayed_work(amt_wq, &tunnel->gc_wq,
2669 msecs_to_jiffies(amt_gmi(amt)));
2670 spin_unlock_bh(&amt->lock);
2673 tunnel->nonce = amtrh->nonce;
2674 mac = siphash_3u32((__force u32)tunnel->ip4,
2675 (__force u32)tunnel->source_port,
2676 (__force u32)tunnel->nonce,
2678 tunnel->mac = mac >> 16;
2680 if (!netif_running(amt->dev) || !netif_running(amt->stream_dev))
2684 amt_send_igmp_gq(amt, tunnel);
2686 amt_send_mld_gq(amt, tunnel);
2691 static int amt_rcv(struct sock *sk, struct sk_buff *skb)
2693 struct amt_dev *amt;
2699 amt = rcu_dereference_sk_user_data(sk);
2706 skb->dev = amt->dev;
2708 type = amt_parse_type(skb);
2714 if (amt->mode == AMT_MODE_GATEWAY) {
2716 case AMT_MSG_ADVERTISEMENT:
2717 if (iph->saddr != amt->discovery_ip) {
2718 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2722 err = amt_advertisement_handler(amt, skb);
2724 case AMT_MSG_MULTICAST_DATA:
2725 if (iph->saddr != amt->remote_ip) {
2726 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2730 err = amt_multicast_data_handler(amt, skb);
2735 case AMT_MSG_MEMBERSHIP_QUERY:
2736 if (iph->saddr != amt->remote_ip) {
2737 netdev_dbg(amt->dev, "Invalid Relay IP\n");
2741 err = amt_membership_query_handler(amt, skb);
2748 netdev_dbg(amt->dev, "Invalid type of Gateway\n");
2753 case AMT_MSG_DISCOVERY:
2754 err = amt_discovery_handler(amt, skb);
2756 case AMT_MSG_REQUEST:
2757 err = amt_request_handler(amt, skb);
2759 case AMT_MSG_MEMBERSHIP_UPDATE:
2760 err = amt_update_handler(amt, skb);
2767 netdev_dbg(amt->dev, "Invalid type of relay\n");
2773 amt->dev->stats.rx_dropped++;
2779 rcu_read_unlock_bh();
2783 static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
2785 struct amt_dev *amt;
2789 amt = rcu_dereference_sk_user_data(sk);
2793 if (amt->mode != AMT_MODE_GATEWAY)
2796 type = amt_parse_type(skb);
2800 netdev_dbg(amt->dev, "Received IGMP Unreachable of %s\n",
2803 case AMT_MSG_DISCOVERY:
2805 case AMT_MSG_REQUEST:
2806 case AMT_MSG_MEMBERSHIP_UPDATE:
2807 if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
2808 mod_delayed_work(amt_wq, &amt->req_wq, 0);
2814 rcu_read_unlock_bh();
2817 rcu_read_unlock_bh();
2818 amt->dev->stats.rx_dropped++;
2822 static struct socket *amt_create_sock(struct net *net, __be16 port)
2824 struct udp_port_cfg udp_conf;
2825 struct socket *sock;
2828 memset(&udp_conf, 0, sizeof(udp_conf));
2829 udp_conf.family = AF_INET;
2830 udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
2832 udp_conf.local_udp_port = port;
2834 err = udp_sock_create(net, &udp_conf, &sock);
2836 return ERR_PTR(err);
2841 static int amt_socket_create(struct amt_dev *amt)
2843 struct udp_tunnel_sock_cfg tunnel_cfg;
2844 struct socket *sock;
2846 sock = amt_create_sock(amt->net, amt->relay_port);
2848 return PTR_ERR(sock);
2850 /* Mark socket as an encapsulation socket */
2851 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
2852 tunnel_cfg.sk_user_data = amt;
2853 tunnel_cfg.encap_type = 1;
2854 tunnel_cfg.encap_rcv = amt_rcv;
2855 tunnel_cfg.encap_err_lookup = amt_err_lookup;
2856 tunnel_cfg.encap_destroy = NULL;
2857 setup_udp_tunnel_sock(amt->net, sock, &tunnel_cfg);
2859 rcu_assign_pointer(amt->sock, sock);
2863 static int amt_dev_open(struct net_device *dev)
2865 struct amt_dev *amt = netdev_priv(dev);
2868 amt->ready4 = false;
2869 amt->ready6 = false;
2871 err = amt_socket_create(amt);
2877 get_random_bytes(&amt->key, sizeof(siphash_key_t));
2879 amt->status = AMT_STATUS_INIT;
2880 if (amt->mode == AMT_MODE_GATEWAY) {
2881 mod_delayed_work(amt_wq, &amt->discovery_wq, 0);
2882 mod_delayed_work(amt_wq, &amt->req_wq, 0);
2883 } else if (amt->mode == AMT_MODE_RELAY) {
2884 mod_delayed_work(amt_wq, &amt->secret_wq,
2885 msecs_to_jiffies(AMT_SECRET_TIMEOUT));
2890 static int amt_dev_stop(struct net_device *dev)
2892 struct amt_dev *amt = netdev_priv(dev);
2893 struct amt_tunnel_list *tunnel, *tmp;
2894 struct socket *sock;
2896 cancel_delayed_work_sync(&amt->req_wq);
2897 cancel_delayed_work_sync(&amt->discovery_wq);
2898 cancel_delayed_work_sync(&amt->secret_wq);
2901 sock = rtnl_dereference(amt->sock);
2902 RCU_INIT_POINTER(amt->sock, NULL);
2905 udp_tunnel_sock_release(sock);
2907 amt->ready4 = false;
2908 amt->ready6 = false;
2912 list_for_each_entry_safe(tunnel, tmp, &amt->tunnel_list, list) {
2913 list_del_rcu(&tunnel->list);
2915 cancel_delayed_work_sync(&tunnel->gc_wq);
2916 amt_clear_groups(tunnel);
2917 kfree_rcu(tunnel, rcu);
2923 static const struct device_type amt_type = {
2927 static int amt_dev_init(struct net_device *dev)
2929 struct amt_dev *amt = netdev_priv(dev);
2933 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2937 err = gro_cells_init(&amt->gro_cells, dev);
2939 free_percpu(dev->tstats);
2946 static void amt_dev_uninit(struct net_device *dev)
2948 struct amt_dev *amt = netdev_priv(dev);
2950 gro_cells_destroy(&amt->gro_cells);
2951 free_percpu(dev->tstats);
2954 static const struct net_device_ops amt_netdev_ops = {
2955 .ndo_init = amt_dev_init,
2956 .ndo_uninit = amt_dev_uninit,
2957 .ndo_open = amt_dev_open,
2958 .ndo_stop = amt_dev_stop,
2959 .ndo_start_xmit = amt_dev_xmit,
2960 .ndo_get_stats64 = dev_get_tstats64,
2963 static void amt_link_setup(struct net_device *dev)
2965 dev->netdev_ops = &amt_netdev_ops;
2966 dev->needs_free_netdev = true;
2967 SET_NETDEV_DEVTYPE(dev, &amt_type);
2968 dev->min_mtu = ETH_MIN_MTU;
2969 dev->max_mtu = ETH_MAX_MTU;
2970 dev->type = ARPHRD_NONE;
2971 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
2972 dev->hard_header_len = 0;
2974 dev->priv_flags |= IFF_NO_QUEUE;
2975 dev->features |= NETIF_F_LLTX;
2976 dev->features |= NETIF_F_GSO_SOFTWARE;
2977 dev->features |= NETIF_F_NETNS_LOCAL;
2978 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2979 dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
2980 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2981 eth_hw_addr_random(dev);
2982 eth_zero_addr(dev->broadcast);
2986 static const struct nla_policy amt_policy[IFLA_AMT_MAX + 1] = {
2987 [IFLA_AMT_MODE] = { .type = NLA_U32 },
2988 [IFLA_AMT_RELAY_PORT] = { .type = NLA_U16 },
2989 [IFLA_AMT_GATEWAY_PORT] = { .type = NLA_U16 },
2990 [IFLA_AMT_LINK] = { .type = NLA_U32 },
2991 [IFLA_AMT_LOCAL_IP] = { .len = sizeof_field(struct iphdr, daddr) },
2992 [IFLA_AMT_REMOTE_IP] = { .len = sizeof_field(struct iphdr, daddr) },
2993 [IFLA_AMT_DISCOVERY_IP] = { .len = sizeof_field(struct iphdr, daddr) },
2994 [IFLA_AMT_MAX_TUNNELS] = { .type = NLA_U32 },
2997 static int amt_validate(struct nlattr *tb[], struct nlattr *data[],
2998 struct netlink_ext_ack *extack)
3003 if (!data[IFLA_AMT_LINK]) {
3004 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LINK],
3005 "Link attribute is required");
3009 if (!data[IFLA_AMT_MODE]) {
3010 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
3011 "Mode attribute is required");
3015 if (nla_get_u32(data[IFLA_AMT_MODE]) > AMT_MODE_MAX) {
3016 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
3017 "Mode attribute is not valid");
3021 if (!data[IFLA_AMT_LOCAL_IP]) {
3022 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_DISCOVERY_IP],
3023 "Local attribute is required");
3027 if (!data[IFLA_AMT_DISCOVERY_IP] &&
3028 nla_get_u32(data[IFLA_AMT_MODE]) == AMT_MODE_GATEWAY) {
3029 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LOCAL_IP],
3030 "Discovery attribute is required");
3037 static int amt_newlink(struct net *net, struct net_device *dev,
3038 struct nlattr *tb[], struct nlattr *data[],
3039 struct netlink_ext_ack *extack)
3041 struct amt_dev *amt = netdev_priv(dev);
3045 amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
3047 if (data[IFLA_AMT_MAX_TUNNELS] &&
3048 nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]))
3049 amt->max_tunnels = nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]);
3051 amt->max_tunnels = AMT_MAX_TUNNELS;
3053 spin_lock_init(&amt->lock);
3054 amt->max_groups = AMT_MAX_GROUP;
3055 amt->max_sources = AMT_MAX_SOURCE;
3056 amt->hash_buckets = AMT_HSIZE;
3057 amt->nr_tunnels = 0;
3058 get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
3059 amt->stream_dev = dev_get_by_index(net,
3060 nla_get_u32(data[IFLA_AMT_LINK]));
3061 if (!amt->stream_dev) {
3062 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
3063 "Can't find stream device");
3067 if (amt->stream_dev->type != ARPHRD_ETHER) {
3068 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
3069 "Invalid stream device type");
3073 amt->local_ip = nla_get_in_addr(data[IFLA_AMT_LOCAL_IP]);
3074 if (ipv4_is_loopback(amt->local_ip) ||
3075 ipv4_is_zeronet(amt->local_ip) ||
3076 ipv4_is_multicast(amt->local_ip)) {
3077 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LOCAL_IP],
3078 "Invalid Local address");
3082 if (data[IFLA_AMT_RELAY_PORT])
3083 amt->relay_port = nla_get_be16(data[IFLA_AMT_RELAY_PORT]);
3085 amt->relay_port = htons(IANA_AMT_UDP_PORT);
3087 if (data[IFLA_AMT_GATEWAY_PORT])
3088 amt->gw_port = nla_get_be16(data[IFLA_AMT_GATEWAY_PORT]);
3090 amt->gw_port = htons(IANA_AMT_UDP_PORT);
3092 if (!amt->relay_port) {
3093 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3094 "relay port must not be 0");
3097 if (amt->mode == AMT_MODE_RELAY) {
3098 amt->qrv = amt->net->ipv4.sysctl_igmp_qrv;
3100 dev->needed_headroom = amt->stream_dev->needed_headroom +
3102 dev->mtu = amt->stream_dev->mtu - AMT_RELAY_HLEN;
3103 dev->max_mtu = dev->mtu;
3104 dev->min_mtu = ETH_MIN_MTU + AMT_RELAY_HLEN;
3106 if (!data[IFLA_AMT_DISCOVERY_IP]) {
3107 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3108 "discovery must be set in gateway mode");
3111 if (!amt->gw_port) {
3112 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3113 "gateway port must not be 0");
3117 amt->discovery_ip = nla_get_in_addr(data[IFLA_AMT_DISCOVERY_IP]);
3118 if (ipv4_is_loopback(amt->discovery_ip) ||
3119 ipv4_is_zeronet(amt->discovery_ip) ||
3120 ipv4_is_multicast(amt->discovery_ip)) {
3121 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
3122 "discovery must be unicast");
3126 dev->needed_headroom = amt->stream_dev->needed_headroom +
3128 dev->mtu = amt->stream_dev->mtu - AMT_GW_HLEN;
3129 dev->max_mtu = dev->mtu;
3130 dev->min_mtu = ETH_MIN_MTU + AMT_GW_HLEN;
3132 amt->qi = AMT_INIT_QUERY_INTERVAL;
3134 err = register_netdevice(dev);
3136 netdev_dbg(dev, "failed to register new netdev %d\n", err);
3140 err = netdev_upper_dev_link(amt->stream_dev, dev, extack);
3142 unregister_netdevice(dev);
3146 INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
3147 INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
3148 INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
3149 INIT_LIST_HEAD(&amt->tunnel_list);
3153 dev_put(amt->stream_dev);
3157 static void amt_dellink(struct net_device *dev, struct list_head *head)
3159 struct amt_dev *amt = netdev_priv(dev);
3161 unregister_netdevice_queue(dev, head);
3162 netdev_upper_dev_unlink(amt->stream_dev, dev);
3163 dev_put(amt->stream_dev);
3166 static size_t amt_get_size(const struct net_device *dev)
3168 return nla_total_size(sizeof(__u32)) + /* IFLA_AMT_MODE */
3169 nla_total_size(sizeof(__u16)) + /* IFLA_AMT_RELAY_PORT */
3170 nla_total_size(sizeof(__u16)) + /* IFLA_AMT_GATEWAY_PORT */
3171 nla_total_size(sizeof(__u32)) + /* IFLA_AMT_LINK */
3172 nla_total_size(sizeof(__u32)) + /* IFLA_MAX_TUNNELS */
3173 nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_DISCOVERY_IP */
3174 nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_REMOTE_IP */
3175 nla_total_size(sizeof(struct iphdr)); /* IFLA_AMT_LOCAL_IP */
3178 static int amt_fill_info(struct sk_buff *skb, const struct net_device *dev)
3180 struct amt_dev *amt = netdev_priv(dev);
3182 if (nla_put_u32(skb, IFLA_AMT_MODE, amt->mode))
3183 goto nla_put_failure;
3184 if (nla_put_be16(skb, IFLA_AMT_RELAY_PORT, amt->relay_port))
3185 goto nla_put_failure;
3186 if (nla_put_be16(skb, IFLA_AMT_GATEWAY_PORT, amt->gw_port))
3187 goto nla_put_failure;
3188 if (nla_put_u32(skb, IFLA_AMT_LINK, amt->stream_dev->ifindex))
3189 goto nla_put_failure;
3190 if (nla_put_in_addr(skb, IFLA_AMT_LOCAL_IP, amt->local_ip))
3191 goto nla_put_failure;
3192 if (nla_put_in_addr(skb, IFLA_AMT_DISCOVERY_IP, amt->discovery_ip))
3193 goto nla_put_failure;
3195 if (nla_put_in_addr(skb, IFLA_AMT_REMOTE_IP, amt->remote_ip))
3196 goto nla_put_failure;
3197 if (nla_put_u32(skb, IFLA_AMT_MAX_TUNNELS, amt->max_tunnels))
3198 goto nla_put_failure;
3206 static struct rtnl_link_ops amt_link_ops __read_mostly = {
3208 .maxtype = IFLA_AMT_MAX,
3209 .policy = amt_policy,
3210 .priv_size = sizeof(struct amt_dev),
3211 .setup = amt_link_setup,
3212 .validate = amt_validate,
3213 .newlink = amt_newlink,
3214 .dellink = amt_dellink,
3215 .get_size = amt_get_size,
3216 .fill_info = amt_fill_info,
3219 static struct net_device *amt_lookup_upper_dev(struct net_device *dev)
3221 struct net_device *upper_dev;
3222 struct amt_dev *amt;
3224 for_each_netdev(dev_net(dev), upper_dev) {
3225 if (netif_is_amt(upper_dev)) {
3226 amt = netdev_priv(upper_dev);
3227 if (amt->stream_dev == dev)
3235 static int amt_device_event(struct notifier_block *unused,
3236 unsigned long event, void *ptr)
3238 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3239 struct net_device *upper_dev;
3240 struct amt_dev *amt;
3244 upper_dev = amt_lookup_upper_dev(dev);
3247 amt = netdev_priv(upper_dev);
3250 case NETDEV_UNREGISTER:
3251 amt_dellink(amt->dev, &list);
3252 unregister_netdevice_many(&list);
3254 case NETDEV_CHANGEMTU:
3255 if (amt->mode == AMT_MODE_RELAY)
3256 new_mtu = dev->mtu - AMT_RELAY_HLEN;
3258 new_mtu = dev->mtu - AMT_GW_HLEN;
3260 dev_set_mtu(amt->dev, new_mtu);
3267 static struct notifier_block amt_notifier_block __read_mostly = {
3268 .notifier_call = amt_device_event,
3271 static int __init amt_init(void)
3275 err = register_netdevice_notifier(&amt_notifier_block);
3279 err = rtnl_link_register(&amt_link_ops);
3281 goto unregister_notifier;
3283 amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1);
3286 goto rtnl_unregister;
3289 spin_lock_init(&source_gc_lock);
3290 spin_lock_bh(&source_gc_lock);
3291 INIT_DELAYED_WORK(&source_gc_wq, amt_source_gc_work);
3292 mod_delayed_work(amt_wq, &source_gc_wq,
3293 msecs_to_jiffies(AMT_GC_INTERVAL));
3294 spin_unlock_bh(&source_gc_lock);
3299 rtnl_link_unregister(&amt_link_ops);
3300 unregister_notifier:
3301 unregister_netdevice_notifier(&amt_notifier_block);
3303 pr_err("error loading AMT module loaded\n");
3306 late_initcall(amt_init);
3308 static void __exit amt_fini(void)
3310 rtnl_link_unregister(&amt_link_ops);
3311 unregister_netdevice_notifier(&amt_notifier_block);
3312 cancel_delayed_work_sync(&source_gc_wq);
3313 __amt_source_gc_work();
3314 destroy_workqueue(amt_wq);
3316 module_exit(amt_fini);
3318 MODULE_LICENSE("GPL");
3319 MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
3320 MODULE_ALIAS_RTNL_LINK("amt");