1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/switchdev/switchdev.c - Switch device API
4 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
5 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/notifier.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_bridge.h>
16 #include <linux/list.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_vlan.h>
19 #include <linux/rtnetlink.h>
20 #include <net/switchdev.h>
22 static bool switchdev_obj_eq(const struct switchdev_obj *a,
23 const struct switchdev_obj *b)
25 const struct switchdev_obj_port_vlan *va, *vb;
26 const struct switchdev_obj_port_mdb *ma, *mb;
28 if (a->id != b->id || a->orig_dev != b->orig_dev)
32 case SWITCHDEV_OBJ_ID_PORT_VLAN:
33 va = SWITCHDEV_OBJ_PORT_VLAN(a);
34 vb = SWITCHDEV_OBJ_PORT_VLAN(b);
35 return va->flags == vb->flags &&
37 va->changed == vb->changed;
38 case SWITCHDEV_OBJ_ID_PORT_MDB:
39 case SWITCHDEV_OBJ_ID_HOST_MDB:
40 ma = SWITCHDEV_OBJ_PORT_MDB(a);
41 mb = SWITCHDEV_OBJ_PORT_MDB(b);
42 return ma->vid == mb->vid &&
43 ether_addr_equal(ma->addr, mb->addr);
51 static LIST_HEAD(deferred);
52 static DEFINE_SPINLOCK(deferred_lock);
54 typedef void switchdev_deferred_func_t(struct net_device *dev,
57 struct switchdev_deferred_item {
58 struct list_head list;
59 struct net_device *dev;
60 netdevice_tracker dev_tracker;
61 switchdev_deferred_func_t *func;
65 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
67 struct switchdev_deferred_item *dfitem;
69 spin_lock_bh(&deferred_lock);
70 if (list_empty(&deferred)) {
74 dfitem = list_first_entry(&deferred,
75 struct switchdev_deferred_item, list);
76 list_del(&dfitem->list);
78 spin_unlock_bh(&deferred_lock);
83 * switchdev_deferred_process - Process ops in deferred queue
85 * Called to flush the ops currently queued in deferred ops queue.
86 * rtnl_lock must be held.
88 void switchdev_deferred_process(void)
90 struct switchdev_deferred_item *dfitem;
94 while ((dfitem = switchdev_deferred_dequeue())) {
95 dfitem->func(dfitem->dev, dfitem->data);
96 netdev_put(dfitem->dev, &dfitem->dev_tracker);
100 EXPORT_SYMBOL_GPL(switchdev_deferred_process);
102 static void switchdev_deferred_process_work(struct work_struct *work)
105 switchdev_deferred_process();
109 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
111 static int switchdev_deferred_enqueue(struct net_device *dev,
112 const void *data, size_t data_len,
113 switchdev_deferred_func_t *func)
115 struct switchdev_deferred_item *dfitem;
117 dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC);
122 memcpy(dfitem->data, data, data_len);
123 netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC);
124 spin_lock_bh(&deferred_lock);
125 list_add_tail(&dfitem->list, &deferred);
126 spin_unlock_bh(&deferred_lock);
127 schedule_work(&deferred_process_work);
131 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
132 struct net_device *dev,
133 const struct switchdev_attr *attr,
134 struct netlink_ext_ack *extack)
139 struct switchdev_notifier_port_attr_info attr_info = {
144 rc = call_switchdev_blocking_notifiers(nt, dev,
145 &attr_info.info, extack);
146 err = notifier_to_errno(rc);
148 WARN_ON(!attr_info.handled);
152 if (!attr_info.handled)
158 static int switchdev_port_attr_set_now(struct net_device *dev,
159 const struct switchdev_attr *attr,
160 struct netlink_ext_ack *extack)
162 return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
166 static void switchdev_port_attr_set_deferred(struct net_device *dev,
169 const struct switchdev_attr *attr = data;
172 err = switchdev_port_attr_set_now(dev, attr, NULL);
173 if (err && err != -EOPNOTSUPP)
174 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
177 attr->complete(dev, err, attr->complete_priv);
180 static int switchdev_port_attr_set_defer(struct net_device *dev,
181 const struct switchdev_attr *attr)
183 return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
184 switchdev_port_attr_set_deferred);
188 * switchdev_port_attr_set - Set port attribute
191 * @attr: attribute to set
192 * @extack: netlink extended ack, for error message propagation
194 * rtnl_lock must be held and must not be in atomic section,
195 * in case SWITCHDEV_F_DEFER flag is not set.
197 int switchdev_port_attr_set(struct net_device *dev,
198 const struct switchdev_attr *attr,
199 struct netlink_ext_ack *extack)
201 if (attr->flags & SWITCHDEV_F_DEFER)
202 return switchdev_port_attr_set_defer(dev, attr);
204 return switchdev_port_attr_set_now(dev, attr, extack);
206 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
208 static size_t switchdev_obj_size(const struct switchdev_obj *obj)
211 case SWITCHDEV_OBJ_ID_PORT_VLAN:
212 return sizeof(struct switchdev_obj_port_vlan);
213 case SWITCHDEV_OBJ_ID_PORT_MDB:
214 return sizeof(struct switchdev_obj_port_mdb);
215 case SWITCHDEV_OBJ_ID_HOST_MDB:
216 return sizeof(struct switchdev_obj_port_mdb);
223 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
224 struct net_device *dev,
225 const struct switchdev_obj *obj,
226 struct netlink_ext_ack *extack)
231 struct switchdev_notifier_port_obj_info obj_info = {
236 rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
237 err = notifier_to_errno(rc);
239 WARN_ON(!obj_info.handled);
242 if (!obj_info.handled)
247 static void switchdev_port_obj_add_deferred(struct net_device *dev,
250 const struct switchdev_obj *obj = data;
254 err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
256 if (err && err != -EOPNOTSUPP)
257 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
260 obj->complete(dev, err, obj->complete_priv);
263 static int switchdev_port_obj_add_defer(struct net_device *dev,
264 const struct switchdev_obj *obj)
266 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
267 switchdev_port_obj_add_deferred);
271 * switchdev_port_obj_add - Add port object
274 * @obj: object to add
275 * @extack: netlink extended ack
277 * rtnl_lock must be held and must not be in atomic section,
278 * in case SWITCHDEV_F_DEFER flag is not set.
280 int switchdev_port_obj_add(struct net_device *dev,
281 const struct switchdev_obj *obj,
282 struct netlink_ext_ack *extack)
284 if (obj->flags & SWITCHDEV_F_DEFER)
285 return switchdev_port_obj_add_defer(dev, obj);
287 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
290 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
292 static int switchdev_port_obj_del_now(struct net_device *dev,
293 const struct switchdev_obj *obj)
295 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
299 static void switchdev_port_obj_del_deferred(struct net_device *dev,
302 const struct switchdev_obj *obj = data;
305 err = switchdev_port_obj_del_now(dev, obj);
306 if (err && err != -EOPNOTSUPP)
307 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
310 obj->complete(dev, err, obj->complete_priv);
313 static int switchdev_port_obj_del_defer(struct net_device *dev,
314 const struct switchdev_obj *obj)
316 return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
317 switchdev_port_obj_del_deferred);
321 * switchdev_port_obj_del - Delete port object
324 * @obj: object to delete
326 * rtnl_lock must be held and must not be in atomic section,
327 * in case SWITCHDEV_F_DEFER flag is not set.
329 int switchdev_port_obj_del(struct net_device *dev,
330 const struct switchdev_obj *obj)
332 if (obj->flags & SWITCHDEV_F_DEFER)
333 return switchdev_port_obj_del_defer(dev, obj);
335 return switchdev_port_obj_del_now(dev, obj);
337 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
340 * switchdev_port_obj_act_is_deferred - Is object action pending?
343 * @nt: type of action; add or delete
344 * @obj: object to test
346 * Returns true if a deferred item is pending, which is
347 * equivalent to the action @nt on an object @obj.
349 * rtnl_lock must be held.
351 bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
352 enum switchdev_notifier_type nt,
353 const struct switchdev_obj *obj)
355 struct switchdev_deferred_item *dfitem;
360 spin_lock_bh(&deferred_lock);
362 list_for_each_entry(dfitem, &deferred, list) {
363 if (dfitem->dev != dev)
366 if ((dfitem->func == switchdev_port_obj_add_deferred &&
367 nt == SWITCHDEV_PORT_OBJ_ADD) ||
368 (dfitem->func == switchdev_port_obj_del_deferred &&
369 nt == SWITCHDEV_PORT_OBJ_DEL)) {
370 if (switchdev_obj_eq((const void *)dfitem->data, obj)) {
377 spin_unlock_bh(&deferred_lock);
381 EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
383 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
384 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
387 * register_switchdev_notifier - Register notifier
388 * @nb: notifier_block
390 * Register switch device notifier.
392 int register_switchdev_notifier(struct notifier_block *nb)
394 return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
396 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
399 * unregister_switchdev_notifier - Unregister notifier
400 * @nb: notifier_block
402 * Unregister switch device notifier.
404 int unregister_switchdev_notifier(struct notifier_block *nb)
406 return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
408 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
411 * call_switchdev_notifiers - Call notifiers
412 * @val: value passed unmodified to notifier function
414 * @info: notifier information data
415 * @extack: netlink extended ack
416 * Call all network notifier blocks.
418 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
419 struct switchdev_notifier_info *info,
420 struct netlink_ext_ack *extack)
423 info->extack = extack;
424 return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
426 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
428 int register_switchdev_blocking_notifier(struct notifier_block *nb)
430 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
432 return blocking_notifier_chain_register(chain, nb);
434 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
436 int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
438 struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
440 return blocking_notifier_chain_unregister(chain, nb);
442 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
444 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
445 struct switchdev_notifier_info *info,
446 struct netlink_ext_ack *extack)
449 info->extack = extack;
450 return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
453 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
455 struct switchdev_nested_priv {
456 bool (*check_cb)(const struct net_device *dev);
457 bool (*foreign_dev_check_cb)(const struct net_device *dev,
458 const struct net_device *foreign_dev);
459 const struct net_device *dev;
460 struct net_device *lower_dev;
463 static int switchdev_lower_dev_walk(struct net_device *lower_dev,
464 struct netdev_nested_priv *priv)
466 struct switchdev_nested_priv *switchdev_priv = priv->data;
467 bool (*foreign_dev_check_cb)(const struct net_device *dev,
468 const struct net_device *foreign_dev);
469 bool (*check_cb)(const struct net_device *dev);
470 const struct net_device *dev;
472 check_cb = switchdev_priv->check_cb;
473 foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
474 dev = switchdev_priv->dev;
476 if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
477 switchdev_priv->lower_dev = lower_dev;
484 static struct net_device *
485 switchdev_lower_dev_find_rcu(struct net_device *dev,
486 bool (*check_cb)(const struct net_device *dev),
487 bool (*foreign_dev_check_cb)(const struct net_device *dev,
488 const struct net_device *foreign_dev))
490 struct switchdev_nested_priv switchdev_priv = {
491 .check_cb = check_cb,
492 .foreign_dev_check_cb = foreign_dev_check_cb,
496 struct netdev_nested_priv priv = {
497 .data = &switchdev_priv,
500 netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
502 return switchdev_priv.lower_dev;
505 static struct net_device *
506 switchdev_lower_dev_find(struct net_device *dev,
507 bool (*check_cb)(const struct net_device *dev),
508 bool (*foreign_dev_check_cb)(const struct net_device *dev,
509 const struct net_device *foreign_dev))
511 struct switchdev_nested_priv switchdev_priv = {
512 .check_cb = check_cb,
513 .foreign_dev_check_cb = foreign_dev_check_cb,
517 struct netdev_nested_priv priv = {
518 .data = &switchdev_priv,
521 netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
523 return switchdev_priv.lower_dev;
526 static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
527 struct net_device *orig_dev, unsigned long event,
528 const struct switchdev_notifier_fdb_info *fdb_info,
529 bool (*check_cb)(const struct net_device *dev),
530 bool (*foreign_dev_check_cb)(const struct net_device *dev,
531 const struct net_device *foreign_dev),
532 int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
533 unsigned long event, const void *ctx,
534 const struct switchdev_notifier_fdb_info *fdb_info))
536 const struct switchdev_notifier_info *info = &fdb_info->info;
537 struct net_device *br, *lower_dev, *switchdev;
538 struct list_head *iter;
539 int err = -EOPNOTSUPP;
542 return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
544 /* Recurse through lower interfaces in case the FDB entry is pointing
545 * towards a bridge or a LAG device.
547 netdev_for_each_lower_dev(dev, lower_dev, iter) {
548 /* Do not propagate FDB entries across bridges */
549 if (netif_is_bridge_master(lower_dev))
552 /* Bridge ports might be either us, or LAG interfaces
555 if (!check_cb(lower_dev) &&
556 !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
557 foreign_dev_check_cb))
560 err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
561 event, fdb_info, check_cb,
562 foreign_dev_check_cb,
564 if (err && err != -EOPNOTSUPP)
568 /* Event is neither on a bridge nor a LAG. Check whether it is on an
569 * interface that is in a bridge with us.
571 br = netdev_master_upper_dev_get_rcu(dev);
572 if (!br || !netif_is_bridge_master(br))
575 switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb);
579 if (!foreign_dev_check_cb(switchdev, dev))
582 return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
583 check_cb, foreign_dev_check_cb,
587 int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
588 const struct switchdev_notifier_fdb_info *fdb_info,
589 bool (*check_cb)(const struct net_device *dev),
590 bool (*foreign_dev_check_cb)(const struct net_device *dev,
591 const struct net_device *foreign_dev),
592 int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
593 unsigned long event, const void *ctx,
594 const struct switchdev_notifier_fdb_info *fdb_info))
598 err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
599 check_cb, foreign_dev_check_cb,
601 if (err == -EOPNOTSUPP)
606 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
608 static int __switchdev_handle_port_obj_add(struct net_device *dev,
609 struct switchdev_notifier_port_obj_info *port_obj_info,
610 bool (*check_cb)(const struct net_device *dev),
611 bool (*foreign_dev_check_cb)(const struct net_device *dev,
612 const struct net_device *foreign_dev),
613 int (*add_cb)(struct net_device *dev, const void *ctx,
614 const struct switchdev_obj *obj,
615 struct netlink_ext_ack *extack))
617 struct switchdev_notifier_info *info = &port_obj_info->info;
618 struct net_device *br, *lower_dev, *switchdev;
619 struct netlink_ext_ack *extack;
620 struct list_head *iter;
621 int err = -EOPNOTSUPP;
623 extack = switchdev_notifier_info_to_extack(info);
626 err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
627 if (err != -EOPNOTSUPP)
628 port_obj_info->handled = true;
632 /* Switch ports might be stacked under e.g. a LAG. Ignore the
633 * unsupported devices, another driver might be able to handle them. But
634 * propagate to the callers any hard errors.
636 * If the driver does its own bookkeeping of stacked ports, it's not
637 * necessary to go through this helper.
639 netdev_for_each_lower_dev(dev, lower_dev, iter) {
640 if (netif_is_bridge_master(lower_dev))
643 /* When searching for switchdev interfaces that are neighbors
644 * of foreign ones, and @dev is a bridge, do not recurse on the
645 * foreign interface again, it was already visited.
647 if (foreign_dev_check_cb && !check_cb(lower_dev) &&
648 !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
651 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
652 check_cb, foreign_dev_check_cb,
654 if (err && err != -EOPNOTSUPP)
658 /* Event is neither on a bridge nor a LAG. Check whether it is on an
659 * interface that is in a bridge with us.
661 if (!foreign_dev_check_cb)
664 br = netdev_master_upper_dev_get(dev);
665 if (!br || !netif_is_bridge_master(br))
668 switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
672 if (!foreign_dev_check_cb(switchdev, dev))
675 return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
676 foreign_dev_check_cb, add_cb);
679 /* Pass through a port object addition, if @dev passes @check_cb, or replicate
680 * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
683 int switchdev_handle_port_obj_add(struct net_device *dev,
684 struct switchdev_notifier_port_obj_info *port_obj_info,
685 bool (*check_cb)(const struct net_device *dev),
686 int (*add_cb)(struct net_device *dev, const void *ctx,
687 const struct switchdev_obj *obj,
688 struct netlink_ext_ack *extack))
692 err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
694 if (err == -EOPNOTSUPP)
698 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
700 /* Same as switchdev_handle_port_obj_add(), except if object is notified on a
701 * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
702 * that pass @check_cb and are in the same bridge as @dev.
704 int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
705 struct switchdev_notifier_port_obj_info *port_obj_info,
706 bool (*check_cb)(const struct net_device *dev),
707 bool (*foreign_dev_check_cb)(const struct net_device *dev,
708 const struct net_device *foreign_dev),
709 int (*add_cb)(struct net_device *dev, const void *ctx,
710 const struct switchdev_obj *obj,
711 struct netlink_ext_ack *extack))
715 err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
716 foreign_dev_check_cb, add_cb);
717 if (err == -EOPNOTSUPP)
721 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
723 static int __switchdev_handle_port_obj_del(struct net_device *dev,
724 struct switchdev_notifier_port_obj_info *port_obj_info,
725 bool (*check_cb)(const struct net_device *dev),
726 bool (*foreign_dev_check_cb)(const struct net_device *dev,
727 const struct net_device *foreign_dev),
728 int (*del_cb)(struct net_device *dev, const void *ctx,
729 const struct switchdev_obj *obj))
731 struct switchdev_notifier_info *info = &port_obj_info->info;
732 struct net_device *br, *lower_dev, *switchdev;
733 struct list_head *iter;
734 int err = -EOPNOTSUPP;
737 err = del_cb(dev, info->ctx, port_obj_info->obj);
738 if (err != -EOPNOTSUPP)
739 port_obj_info->handled = true;
743 /* Switch ports might be stacked under e.g. a LAG. Ignore the
744 * unsupported devices, another driver might be able to handle them. But
745 * propagate to the callers any hard errors.
747 * If the driver does its own bookkeeping of stacked ports, it's not
748 * necessary to go through this helper.
750 netdev_for_each_lower_dev(dev, lower_dev, iter) {
751 if (netif_is_bridge_master(lower_dev))
754 /* When searching for switchdev interfaces that are neighbors
755 * of foreign ones, and @dev is a bridge, do not recurse on the
756 * foreign interface again, it was already visited.
758 if (foreign_dev_check_cb && !check_cb(lower_dev) &&
759 !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
762 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
763 check_cb, foreign_dev_check_cb,
765 if (err && err != -EOPNOTSUPP)
769 /* Event is neither on a bridge nor a LAG. Check whether it is on an
770 * interface that is in a bridge with us.
772 if (!foreign_dev_check_cb)
775 br = netdev_master_upper_dev_get(dev);
776 if (!br || !netif_is_bridge_master(br))
779 switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
783 if (!foreign_dev_check_cb(switchdev, dev))
786 return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
787 foreign_dev_check_cb, del_cb);
790 /* Pass through a port object deletion, if @dev passes @check_cb, or replicate
791 * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
794 int switchdev_handle_port_obj_del(struct net_device *dev,
795 struct switchdev_notifier_port_obj_info *port_obj_info,
796 bool (*check_cb)(const struct net_device *dev),
797 int (*del_cb)(struct net_device *dev, const void *ctx,
798 const struct switchdev_obj *obj))
802 err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
804 if (err == -EOPNOTSUPP)
808 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
810 /* Same as switchdev_handle_port_obj_del(), except if object is notified on a
811 * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
812 * that pass @check_cb and are in the same bridge as @dev.
814 int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
815 struct switchdev_notifier_port_obj_info *port_obj_info,
816 bool (*check_cb)(const struct net_device *dev),
817 bool (*foreign_dev_check_cb)(const struct net_device *dev,
818 const struct net_device *foreign_dev),
819 int (*del_cb)(struct net_device *dev, const void *ctx,
820 const struct switchdev_obj *obj))
824 err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
825 foreign_dev_check_cb, del_cb);
826 if (err == -EOPNOTSUPP)
830 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
832 static int __switchdev_handle_port_attr_set(struct net_device *dev,
833 struct switchdev_notifier_port_attr_info *port_attr_info,
834 bool (*check_cb)(const struct net_device *dev),
835 int (*set_cb)(struct net_device *dev, const void *ctx,
836 const struct switchdev_attr *attr,
837 struct netlink_ext_ack *extack))
839 struct switchdev_notifier_info *info = &port_attr_info->info;
840 struct netlink_ext_ack *extack;
841 struct net_device *lower_dev;
842 struct list_head *iter;
843 int err = -EOPNOTSUPP;
845 extack = switchdev_notifier_info_to_extack(info);
848 err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
849 if (err != -EOPNOTSUPP)
850 port_attr_info->handled = true;
854 /* Switch ports might be stacked under e.g. a LAG. Ignore the
855 * unsupported devices, another driver might be able to handle them. But
856 * propagate to the callers any hard errors.
858 * If the driver does its own bookkeeping of stacked ports, it's not
859 * necessary to go through this helper.
861 netdev_for_each_lower_dev(dev, lower_dev, iter) {
862 if (netif_is_bridge_master(lower_dev))
865 err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
867 if (err && err != -EOPNOTSUPP)
874 int switchdev_handle_port_attr_set(struct net_device *dev,
875 struct switchdev_notifier_port_attr_info *port_attr_info,
876 bool (*check_cb)(const struct net_device *dev),
877 int (*set_cb)(struct net_device *dev, const void *ctx,
878 const struct switchdev_attr *attr,
879 struct netlink_ext_ack *extack))
883 err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
885 if (err == -EOPNOTSUPP)
889 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
891 int switchdev_bridge_port_offload(struct net_device *brport_dev,
892 struct net_device *dev, const void *ctx,
893 struct notifier_block *atomic_nb,
894 struct notifier_block *blocking_nb,
896 struct netlink_ext_ack *extack)
898 struct switchdev_notifier_brport_info brport_info = {
902 .atomic_nb = atomic_nb,
903 .blocking_nb = blocking_nb,
904 .tx_fwd_offload = tx_fwd_offload,
911 err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
912 brport_dev, &brport_info.info,
914 return notifier_to_errno(err);
916 EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
918 void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
920 struct notifier_block *atomic_nb,
921 struct notifier_block *blocking_nb)
923 struct switchdev_notifier_brport_info brport_info = {
926 .atomic_nb = atomic_nb,
927 .blocking_nb = blocking_nb,
933 call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
934 brport_dev, &brport_info.info,
937 EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);
939 int switchdev_bridge_port_replay(struct net_device *brport_dev,
940 struct net_device *dev, const void *ctx,
941 struct notifier_block *atomic_nb,
942 struct notifier_block *blocking_nb,
943 struct netlink_ext_ack *extack)
945 struct switchdev_notifier_brport_info brport_info = {
949 .atomic_nb = atomic_nb,
950 .blocking_nb = blocking_nb,
957 err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_REPLAY,
958 brport_dev, &brport_info.info,
960 return notifier_to_errno(err);
962 EXPORT_SYMBOL_GPL(switchdev_bridge_port_replay);