3 * Ethernet-type device handling.
5 * Authors: Ben Greear <greearb@candelatech.com>
6 * Please send support related email to: netdev@vger.kernel.org
7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
10 * Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
11 * Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
12 * Correct all the locking - David S. Miller <davem@redhat.com>;
13 * Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/capability.h>
24 #include <linux/module.h>
25 #include <linux/netdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/rculist.h>
30 #include <net/p8022.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/notifier.h>
34 #include <net/rtnetlink.h>
35 #include <net/net_namespace.h>
36 #include <net/netns/generic.h>
37 #include <linux/uaccess.h>
39 #include <linux/if_vlan.h>
43 #define DRV_VERSION "1.8"
45 /* Global VLAN variables */
47 unsigned int vlan_net_id __read_mostly;
49 const char vlan_fullname[] = "802.1Q VLAN Support";
50 const char vlan_version[] = DRV_VERSION;
52 /* End of global variables definitions. */
54 static int vlan_group_prealloc_vid(struct vlan_group *vg,
55 __be16 vlan_proto, u16 vlan_id)
57 struct net_device **array;
58 unsigned int pidx, vidx;
63 pidx = vlan_proto_idx(vlan_proto);
64 vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
65 array = vg->vlan_devices_arrays[pidx][vidx];
69 size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
70 array = kzalloc(size, GFP_KERNEL);
74 vg->vlan_devices_arrays[pidx][vidx] = array;
78 void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
80 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
81 struct net_device *real_dev = vlan->real_dev;
82 struct vlan_info *vlan_info;
83 struct vlan_group *grp;
84 u16 vlan_id = vlan->vlan_id;
88 vlan_info = rtnl_dereference(real_dev->vlan_info);
91 grp = &vlan_info->grp;
95 if (vlan->flags & VLAN_FLAG_MVRP)
96 vlan_mvrp_request_leave(dev);
97 if (vlan->flags & VLAN_FLAG_GVRP)
98 vlan_gvrp_request_leave(dev);
100 vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
102 netdev_upper_dev_unlink(real_dev, dev);
103 /* Because unregister_netdevice_queue() makes sure at least one rcu
104 * grace period is respected before device freeing,
105 * we dont need to call synchronize_net() here.
107 unregister_netdevice_queue(dev, head);
109 if (grp->nr_vlan_devs == 0) {
110 vlan_mvrp_uninit_applicant(real_dev);
111 vlan_gvrp_uninit_applicant(real_dev);
114 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
116 /* Get rid of the vlan's reference to real_dev */
120 int vlan_check_real_dev(struct net_device *real_dev,
121 __be16 protocol, u16 vlan_id,
122 struct netlink_ext_ack *extack)
124 const char *name = real_dev->name;
126 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
127 pr_info("VLANs not supported on %s\n", name);
128 NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device");
132 if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL) {
133 NL_SET_ERR_MSG_MOD(extack, "VLAN device already exists");
140 int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
142 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
143 struct net_device *real_dev = vlan->real_dev;
144 u16 vlan_id = vlan->vlan_id;
145 struct vlan_info *vlan_info;
146 struct vlan_group *grp;
149 err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id);
153 vlan_info = rtnl_dereference(real_dev->vlan_info);
154 /* vlan_info should be there now. vlan_vid_add took care of it */
157 grp = &vlan_info->grp;
158 if (grp->nr_vlan_devs == 0) {
159 err = vlan_gvrp_init_applicant(real_dev);
162 err = vlan_mvrp_init_applicant(real_dev);
164 goto out_uninit_gvrp;
167 err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);
169 goto out_uninit_mvrp;
171 vlan->nest_level = dev_get_nest_level(real_dev) + 1;
172 err = register_netdevice(dev);
174 goto out_uninit_mvrp;
176 err = netdev_upper_dev_link(real_dev, dev, extack);
178 goto out_unregister_netdev;
180 /* Account for reference in struct vlan_dev_priv */
183 netif_stacked_transfer_operstate(real_dev, dev);
184 linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
186 /* So, got the sucker initialized, now lets place
187 * it into our local structure.
189 vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
194 out_unregister_netdev:
195 unregister_netdevice(dev);
197 if (grp->nr_vlan_devs == 0)
198 vlan_mvrp_uninit_applicant(real_dev);
200 if (grp->nr_vlan_devs == 0)
201 vlan_gvrp_uninit_applicant(real_dev);
203 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
207 /* Attach a VLAN device to a mac address (ie Ethernet Card).
208 * Returns 0 if the device was created or a negative error code otherwise.
210 static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
212 struct net_device *new_dev;
213 struct vlan_dev_priv *vlan;
214 struct net *net = dev_net(real_dev);
215 struct vlan_net *vn = net_generic(net, vlan_net_id);
219 if (vlan_id >= VLAN_VID_MASK)
222 err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id,
227 /* Gotta set up the fields for the device. */
228 switch (vn->name_type) {
229 case VLAN_NAME_TYPE_RAW_PLUS_VID:
230 /* name will look like: eth1.0005 */
231 snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
233 case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
234 /* Put our vlan.VID in the name.
235 * Name will look like: vlan5
237 snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
239 case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
240 /* Put our vlan.VID in the name.
241 * Name will look like: eth0.5
243 snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
245 case VLAN_NAME_TYPE_PLUS_VID:
246 /* Put our vlan.VID in the name.
247 * Name will look like: vlan0005
250 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
253 new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name,
254 NET_NAME_UNKNOWN, vlan_setup);
259 dev_net_set(new_dev, net);
260 /* need 4 bytes for extra VLAN header info,
261 * hope the underlying device can handle it.
263 new_dev->mtu = real_dev->mtu;
265 vlan = vlan_dev_priv(new_dev);
266 vlan->vlan_proto = htons(ETH_P_8021Q);
267 vlan->vlan_id = vlan_id;
268 vlan->real_dev = real_dev;
270 vlan->flags = VLAN_FLAG_REORDER_HDR;
272 new_dev->rtnl_link_ops = &vlan_link_ops;
273 err = register_vlan_dev(new_dev, NULL);
275 goto out_free_newdev;
280 if (new_dev->reg_state == NETREG_UNINITIALIZED ||
281 new_dev->reg_state == NETREG_UNREGISTERED)
282 free_netdev(new_dev);
286 static void vlan_sync_address(struct net_device *dev,
287 struct net_device *vlandev)
289 struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
291 /* May be called without an actual change */
292 if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
295 /* vlan continues to inherit address of lower device */
296 if (vlan_dev_inherit_address(vlandev, dev))
299 /* vlan address was different from the old address and is equal to
301 if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
302 ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
303 dev_uc_del(dev, vlandev->dev_addr);
305 /* vlan address was equal to the old address and is different from
307 if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
308 !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
309 dev_uc_add(dev, vlandev->dev_addr);
312 ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
315 static void vlan_transfer_features(struct net_device *dev,
316 struct net_device *vlandev)
318 struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
320 vlandev->gso_max_size = dev->gso_max_size;
321 vlandev->gso_max_segs = dev->gso_max_segs;
323 if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
324 vlandev->hard_header_len = dev->hard_header_len;
326 vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
328 #if IS_ENABLED(CONFIG_FCOE)
329 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
332 vlandev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
333 vlandev->priv_flags |= (vlan->real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
335 netdev_update_features(vlandev);
338 static int __vlan_device_event(struct net_device *dev, unsigned long event)
343 case NETDEV_CHANGENAME:
344 vlan_proc_rem_dev(dev);
345 err = vlan_proc_add_dev(dev);
347 case NETDEV_REGISTER:
348 err = vlan_proc_add_dev(dev);
350 case NETDEV_UNREGISTER:
351 vlan_proc_rem_dev(dev);
358 static int vlan_device_event(struct notifier_block *unused, unsigned long event,
361 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
362 struct vlan_group *grp;
363 struct vlan_info *vlan_info;
365 struct net_device *vlandev;
366 struct vlan_dev_priv *vlan;
371 if (is_vlan_dev(dev)) {
372 int err = __vlan_device_event(dev, event);
375 return notifier_from_errno(err);
378 if ((event == NETDEV_UP) &&
379 (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
380 pr_info("adding VLAN 0 to HW filter on device %s\n",
382 vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
384 if (event == NETDEV_DOWN &&
385 (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
386 vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
388 vlan_info = rtnl_dereference(dev->vlan_info);
391 grp = &vlan_info->grp;
393 /* It is OK that we do not hold the group lock right now,
394 * as we run under the RTNL lock.
399 /* Propagate real device state to vlan devices */
400 vlan_group_for_each_dev(grp, i, vlandev)
401 netif_stacked_transfer_operstate(dev, vlandev);
404 case NETDEV_CHANGEADDR:
405 /* Adjust unicast filters on underlying device */
406 vlan_group_for_each_dev(grp, i, vlandev) {
407 flgs = vlandev->flags;
408 if (!(flgs & IFF_UP))
411 vlan_sync_address(dev, vlandev);
415 case NETDEV_CHANGEMTU:
416 vlan_group_for_each_dev(grp, i, vlandev) {
417 if (vlandev->mtu <= dev->mtu)
420 dev_set_mtu(vlandev, dev->mtu);
424 case NETDEV_FEAT_CHANGE:
425 /* Propagate device features to underlying device */
426 vlan_group_for_each_dev(grp, i, vlandev)
427 vlan_transfer_features(dev, vlandev);
431 struct net_device *tmp;
432 LIST_HEAD(close_list);
434 /* Put all VLANs for this dev in the down state too. */
435 vlan_group_for_each_dev(grp, i, vlandev) {
436 flgs = vlandev->flags;
437 if (!(flgs & IFF_UP))
440 vlan = vlan_dev_priv(vlandev);
441 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
442 list_add(&vlandev->close_list, &close_list);
445 dev_close_many(&close_list, false);
447 list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
448 netif_stacked_transfer_operstate(dev, vlandev);
449 list_del_init(&vlandev->close_list);
451 list_del(&close_list);
455 /* Put all VLANs for this dev in the up state too. */
456 vlan_group_for_each_dev(grp, i, vlandev) {
457 flgs = dev_get_flags(vlandev);
461 vlan = vlan_dev_priv(vlandev);
462 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
463 dev_change_flags(vlandev, flgs | IFF_UP);
464 netif_stacked_transfer_operstate(dev, vlandev);
468 case NETDEV_UNREGISTER:
469 /* twiddle thumbs on netns device moves */
470 if (dev->reg_state != NETREG_UNREGISTERING)
473 vlan_group_for_each_dev(grp, i, vlandev) {
474 /* removal of last vid destroys vlan_info, abort
476 if (vlan_info->nr_vids == 1)
479 unregister_vlan_dev(vlandev, &list);
483 unregister_netdevice_many(&list);
486 case NETDEV_PRE_TYPE_CHANGE:
487 /* Forbid underlaying device to change its type. */
488 if (vlan_uses_dev(dev))
492 case NETDEV_NOTIFY_PEERS:
493 case NETDEV_BONDING_FAILOVER:
494 case NETDEV_RESEND_IGMP:
495 /* Propagate to vlan devices */
496 vlan_group_for_each_dev(grp, i, vlandev)
497 call_netdevice_notifiers(event, vlandev);
500 case NETDEV_CVLAN_FILTER_PUSH_INFO:
501 err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021Q));
503 return notifier_from_errno(err);
506 case NETDEV_CVLAN_FILTER_DROP_INFO:
507 vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021Q));
510 case NETDEV_SVLAN_FILTER_PUSH_INFO:
511 err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021AD));
513 return notifier_from_errno(err);
516 case NETDEV_SVLAN_FILTER_DROP_INFO:
517 vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021AD));
525 static struct notifier_block vlan_notifier_block __read_mostly = {
526 .notifier_call = vlan_device_event,
530 * VLAN IOCTL handler.
531 * o execute requested action or pass command to the device driver
532 * arg is really a struct vlan_ioctl_args __user *.
534 static int vlan_ioctl_handler(struct net *net, void __user *arg)
537 struct vlan_ioctl_args args;
538 struct net_device *dev = NULL;
540 if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
543 /* Null terminate this sucker, just in case. */
544 args.device1[sizeof(args.device1) - 1] = 0;
545 args.u.device2[sizeof(args.u.device2) - 1] = 0;
550 case SET_VLAN_INGRESS_PRIORITY_CMD:
551 case SET_VLAN_EGRESS_PRIORITY_CMD:
552 case SET_VLAN_FLAG_CMD:
555 case GET_VLAN_REALDEV_NAME_CMD:
556 case GET_VLAN_VID_CMD:
558 dev = __dev_get_by_name(net, args.device1);
563 if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
568 case SET_VLAN_INGRESS_PRIORITY_CMD:
570 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
572 vlan_dev_set_ingress_priority(dev,
578 case SET_VLAN_EGRESS_PRIORITY_CMD:
580 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
582 err = vlan_dev_set_egress_priority(dev,
587 case SET_VLAN_FLAG_CMD:
589 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
591 err = vlan_dev_change_flags(dev,
592 args.vlan_qos ? args.u.flag : 0,
596 case SET_VLAN_NAME_TYPE_CMD:
598 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
600 if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
603 vn = net_generic(net, vlan_net_id);
604 vn->name_type = args.u.name_type;
613 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
615 err = register_vlan_device(dev, args.u.VID);
620 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
622 unregister_vlan_dev(dev, NULL);
626 case GET_VLAN_REALDEV_NAME_CMD:
628 vlan_dev_get_realdev_name(dev, args.u.device2);
629 if (copy_to_user(arg, &args,
630 sizeof(struct vlan_ioctl_args)))
634 case GET_VLAN_VID_CMD:
636 args.u.VID = vlan_dev_vlan_id(dev);
637 if (copy_to_user(arg, &args,
638 sizeof(struct vlan_ioctl_args)))
651 static struct sk_buff *vlan_gro_receive(struct list_head *head,
654 const struct packet_offload *ptype;
655 unsigned int hlen, off_vlan;
656 struct sk_buff *pp = NULL;
657 struct vlan_hdr *vhdr;
662 off_vlan = skb_gro_offset(skb);
663 hlen = off_vlan + sizeof(*vhdr);
664 vhdr = skb_gro_header_fast(skb, off_vlan);
665 if (skb_gro_header_hard(skb, hlen)) {
666 vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
671 type = vhdr->h_vlan_encapsulated_proto;
674 ptype = gro_find_receive_by_type(type);
680 list_for_each_entry(p, head, list) {
681 struct vlan_hdr *vhdr2;
683 if (!NAPI_GRO_CB(p)->same_flow)
686 vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
687 if (compare_vlan_header(vhdr, vhdr2))
688 NAPI_GRO_CB(p)->same_flow = 0;
691 skb_gro_pull(skb, sizeof(*vhdr));
692 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
693 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
698 skb_gro_flush_final(skb, pp, flush);
703 static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
705 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
706 __be16 type = vhdr->h_vlan_encapsulated_proto;
707 struct packet_offload *ptype;
711 ptype = gro_find_complete_by_type(type);
713 err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
719 static struct packet_offload vlan_packet_offloads[] __read_mostly = {
721 .type = cpu_to_be16(ETH_P_8021Q),
724 .gro_receive = vlan_gro_receive,
725 .gro_complete = vlan_gro_complete,
729 .type = cpu_to_be16(ETH_P_8021AD),
732 .gro_receive = vlan_gro_receive,
733 .gro_complete = vlan_gro_complete,
738 static int __net_init vlan_init_net(struct net *net)
740 struct vlan_net *vn = net_generic(net, vlan_net_id);
743 vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
745 err = vlan_proc_init(net);
750 static void __net_exit vlan_exit_net(struct net *net)
752 vlan_proc_cleanup(net);
755 static struct pernet_operations vlan_net_ops = {
756 .init = vlan_init_net,
757 .exit = vlan_exit_net,
759 .size = sizeof(struct vlan_net),
762 static int __init vlan_proto_init(void)
767 pr_info("%s v%s\n", vlan_fullname, vlan_version);
769 err = register_pernet_subsys(&vlan_net_ops);
773 err = register_netdevice_notifier(&vlan_notifier_block);
777 err = vlan_gvrp_init();
781 err = vlan_mvrp_init();
785 err = vlan_netlink_init();
789 for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
790 dev_add_offload(&vlan_packet_offloads[i]);
792 vlan_ioctl_set(vlan_ioctl_handler);
800 unregister_netdevice_notifier(&vlan_notifier_block);
802 unregister_pernet_subsys(&vlan_net_ops);
807 static void __exit vlan_cleanup_module(void)
811 vlan_ioctl_set(NULL);
813 for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
814 dev_remove_offload(&vlan_packet_offloads[i]);
818 unregister_netdevice_notifier(&vlan_notifier_block);
820 unregister_pernet_subsys(&vlan_net_ops);
821 rcu_barrier(); /* Wait for completion of call_rcu()'s */
827 module_init(vlan_proto_init);
828 module_exit(vlan_cleanup_module);
830 MODULE_LICENSE("GPL");
831 MODULE_VERSION(DRV_VERSION);