1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/act_mirred.c packet mirroring and redirect actions
5 * Authors: Jamal Hadi Salim (2002-4)
7 * TODO: Add ingress support (and socket redirect support)
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/gfp.h>
19 #include <linux/if_arp.h>
20 #include <net/net_namespace.h>
21 #include <net/netlink.h>
23 #include <net/pkt_sched.h>
24 #include <net/pkt_cls.h>
25 #include <linux/tc_act/tc_mirred.h>
26 #include <net/tc_act/tc_mirred.h>
28 static LIST_HEAD(mirred_list);
29 static DEFINE_SPINLOCK(mirred_list_lock);
31 #define MIRRED_RECURSION_LIMIT 4
32 static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
34 static bool tcf_mirred_is_act_redirect(int action)
36 return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
39 static bool tcf_mirred_act_wants_ingress(int action)
42 case TCA_EGRESS_REDIR:
43 case TCA_EGRESS_MIRROR:
45 case TCA_INGRESS_REDIR:
46 case TCA_INGRESS_MIRROR:
53 static bool tcf_mirred_can_reinsert(int action)
65 static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
67 return rcu_dereference_protected(m->tcfm_dev,
68 lockdep_is_held(&m->tcf_lock));
71 static void tcf_mirred_release(struct tc_action *a)
73 struct tcf_mirred *m = to_mirred(a);
74 struct net_device *dev;
76 spin_lock(&mirred_list_lock);
77 list_del(&m->tcfm_list);
78 spin_unlock(&mirred_list_lock);
80 /* last reference to action, no need to lock */
81 dev = rcu_dereference_protected(m->tcfm_dev, 1);
82 dev_put_track(dev, &m->tcfm_dev_tracker);
85 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
86 [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) },
89 static unsigned int mirred_net_id;
90 static struct tc_action_ops act_mirred_ops;
92 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
93 struct nlattr *est, struct tc_action **a,
95 u32 flags, struct netlink_ext_ack *extack)
97 struct tc_action_net *tn = net_generic(net, mirred_net_id);
98 bool bind = flags & TCA_ACT_FLAGS_BIND;
99 struct nlattr *tb[TCA_MIRRED_MAX + 1];
100 struct tcf_chain *goto_ch = NULL;
101 bool mac_header_xmit = false;
102 struct tc_mirred *parm;
103 struct tcf_mirred *m;
109 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
112 ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
113 mirred_policy, extack);
116 if (!tb[TCA_MIRRED_PARMS]) {
117 NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
120 parm = nla_data(tb[TCA_MIRRED_PARMS]);
122 err = tcf_idr_check_alloc(tn, &index, a, bind);
129 switch (parm->eaction) {
130 case TCA_EGRESS_MIRROR:
131 case TCA_EGRESS_REDIR:
132 case TCA_INGRESS_REDIR:
133 case TCA_INGRESS_MIRROR:
137 tcf_idr_release(*a, bind);
139 tcf_idr_cleanup(tn, index);
140 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
145 if (!parm->ifindex) {
146 tcf_idr_cleanup(tn, index);
147 NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
150 ret = tcf_idr_create_from_flags(tn, index, est, a,
151 &act_mirred_ops, bind, flags);
153 tcf_idr_cleanup(tn, index);
157 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
158 tcf_idr_release(*a, bind);
163 if (ret == ACT_P_CREATED)
164 INIT_LIST_HEAD(&m->tcfm_list);
166 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
170 spin_lock_bh(&m->tcf_lock);
173 struct net_device *odev, *ndev;
175 ndev = dev_get_by_index(net, parm->ifindex);
177 spin_unlock_bh(&m->tcf_lock);
181 mac_header_xmit = dev_is_mac_header_xmit(ndev);
182 odev = rcu_replace_pointer(m->tcfm_dev, ndev,
183 lockdep_is_held(&m->tcf_lock));
184 dev_put_track(odev, &m->tcfm_dev_tracker);
185 netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC);
186 m->tcfm_mac_header_xmit = mac_header_xmit;
188 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
189 m->tcfm_eaction = parm->eaction;
190 spin_unlock_bh(&m->tcf_lock);
192 tcf_chain_put_by_act(goto_ch);
194 if (ret == ACT_P_CREATED) {
195 spin_lock(&mirred_list_lock);
196 list_add(&m->tcfm_list, &mirred_list);
197 spin_unlock(&mirred_list_lock);
203 tcf_chain_put_by_act(goto_ch);
205 tcf_idr_release(*a, bind);
209 static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
214 err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
216 err = netif_receive_skb(skb);
221 static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
222 struct tcf_result *res)
224 struct tcf_mirred *m = to_mirred(a);
225 struct sk_buff *skb2 = skb;
226 bool m_mac_header_xmit;
227 struct net_device *dev;
228 unsigned int rec_level;
239 rec_level = __this_cpu_inc_return(mirred_rec_level);
240 if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
241 net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
242 netdev_name(skb->dev));
243 __this_cpu_dec(mirred_rec_level);
247 tcf_lastuse_update(&m->tcf_tm);
248 tcf_action_update_bstats(&m->common, skb);
250 m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
251 m_eaction = READ_ONCE(m->tcfm_eaction);
252 retval = READ_ONCE(m->tcf_action);
253 dev = rcu_dereference_bh(m->tcfm_dev);
254 if (unlikely(!dev)) {
255 pr_notice_once("tc mirred: target device is gone\n");
259 if (unlikely(!(dev->flags & IFF_UP))) {
260 net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
265 /* we could easily avoid the clone only if called by ingress and clsact;
266 * since we can't easily detect the clsact caller, skip clone only for
267 * ingress - that covers the TC S/W datapath.
269 is_redirect = tcf_mirred_is_act_redirect(m_eaction);
270 at_ingress = skb_at_tc_ingress(skb);
271 use_reinsert = at_ingress && is_redirect &&
272 tcf_mirred_can_reinsert(retval);
274 skb2 = skb_clone(skb, GFP_ATOMIC);
279 want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
281 /* All mirred/redirected skbs should clear previous ct info */
283 if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
286 expects_nh = want_ingress || !m_mac_header_xmit;
287 at_nh = skb->data == skb_network_header(skb);
288 if (at_nh != expects_nh) {
289 mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
290 skb_network_header(skb) - skb_mac_header(skb);
292 /* target device/action expect data at nh */
293 skb_pull_rcsum(skb2, mac_len);
295 /* target device/action expect data at mac */
296 skb_push_rcsum(skb2, mac_len);
300 skb2->skb_iif = skb->dev->ifindex;
303 /* mirror is always swallowed */
305 skb_set_redirected(skb2, skb2->tc_at_ingress);
307 /* let's the caller reinsert the packet, if possible */
309 res->ingress = want_ingress;
310 err = tcf_mirred_forward(res->ingress, skb);
312 tcf_action_inc_overlimit_qstats(&m->common);
313 __this_cpu_dec(mirred_rec_level);
314 return TC_ACT_CONSUMED;
318 err = tcf_mirred_forward(want_ingress, skb2);
321 tcf_action_inc_overlimit_qstats(&m->common);
322 if (tcf_mirred_is_act_redirect(m_eaction))
323 retval = TC_ACT_SHOT;
325 __this_cpu_dec(mirred_rec_level);
330 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
331 u64 drops, u64 lastuse, bool hw)
333 struct tcf_mirred *m = to_mirred(a);
334 struct tcf_t *tm = &m->tcf_tm;
336 tcf_action_update_stats(a, bytes, packets, drops, hw);
337 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
340 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
343 unsigned char *b = skb_tail_pointer(skb);
344 struct tcf_mirred *m = to_mirred(a);
345 struct tc_mirred opt = {
346 .index = m->tcf_index,
347 .refcnt = refcount_read(&m->tcf_refcnt) - ref,
348 .bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
350 struct net_device *dev;
353 spin_lock_bh(&m->tcf_lock);
354 opt.action = m->tcf_action;
355 opt.eaction = m->tcfm_eaction;
356 dev = tcf_mirred_dev_dereference(m);
358 opt.ifindex = dev->ifindex;
360 if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
361 goto nla_put_failure;
363 tcf_tm_dump(&t, &m->tcf_tm);
364 if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
365 goto nla_put_failure;
366 spin_unlock_bh(&m->tcf_lock);
371 spin_unlock_bh(&m->tcf_lock);
376 static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
377 struct netlink_callback *cb, int type,
378 const struct tc_action_ops *ops,
379 struct netlink_ext_ack *extack)
381 struct tc_action_net *tn = net_generic(net, mirred_net_id);
383 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
386 static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
388 struct tc_action_net *tn = net_generic(net, mirred_net_id);
390 return tcf_idr_search(tn, a, index);
393 static int mirred_device_event(struct notifier_block *unused,
394 unsigned long event, void *ptr)
396 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
397 struct tcf_mirred *m;
400 if (event == NETDEV_UNREGISTER) {
401 spin_lock(&mirred_list_lock);
402 list_for_each_entry(m, &mirred_list, tcfm_list) {
403 spin_lock_bh(&m->tcf_lock);
404 if (tcf_mirred_dev_dereference(m) == dev) {
405 dev_put_track(dev, &m->tcfm_dev_tracker);
406 /* Note : no rcu grace period necessary, as
407 * net_device are already rcu protected.
409 RCU_INIT_POINTER(m->tcfm_dev, NULL);
411 spin_unlock_bh(&m->tcf_lock);
413 spin_unlock(&mirred_list_lock);
419 static struct notifier_block mirred_device_notifier = {
420 .notifier_call = mirred_device_event,
423 static void tcf_mirred_dev_put(void *priv)
425 struct net_device *dev = priv;
430 static struct net_device *
431 tcf_mirred_get_dev(const struct tc_action *a,
432 tc_action_priv_destructor *destructor)
434 struct tcf_mirred *m = to_mirred(a);
435 struct net_device *dev;
438 dev = rcu_dereference(m->tcfm_dev);
441 *destructor = tcf_mirred_dev_put;
448 static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
450 return nla_total_size(sizeof(struct tc_mirred));
453 static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry,
454 const struct tc_action *act)
456 entry->dev = act->ops->get_dev(act, &entry->destructor);
459 entry->destructor_priv = entry->dev;
462 static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
463 u32 *index_inc, bool bind,
464 struct netlink_ext_ack *extack)
467 struct flow_action_entry *entry = entry_data;
469 if (is_tcf_mirred_egress_redirect(act)) {
470 entry->id = FLOW_ACTION_REDIRECT;
471 tcf_offload_mirred_get_dev(entry, act);
472 } else if (is_tcf_mirred_egress_mirror(act)) {
473 entry->id = FLOW_ACTION_MIRRED;
474 tcf_offload_mirred_get_dev(entry, act);
475 } else if (is_tcf_mirred_ingress_redirect(act)) {
476 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
477 tcf_offload_mirred_get_dev(entry, act);
478 } else if (is_tcf_mirred_ingress_mirror(act)) {
479 entry->id = FLOW_ACTION_MIRRED_INGRESS;
480 tcf_offload_mirred_get_dev(entry, act);
482 NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload");
487 struct flow_offload_action *fl_action = entry_data;
489 if (is_tcf_mirred_egress_redirect(act))
490 fl_action->id = FLOW_ACTION_REDIRECT;
491 else if (is_tcf_mirred_egress_mirror(act))
492 fl_action->id = FLOW_ACTION_MIRRED;
493 else if (is_tcf_mirred_ingress_redirect(act))
494 fl_action->id = FLOW_ACTION_REDIRECT_INGRESS;
495 else if (is_tcf_mirred_ingress_mirror(act))
496 fl_action->id = FLOW_ACTION_MIRRED_INGRESS;
504 static struct tc_action_ops act_mirred_ops = {
507 .owner = THIS_MODULE,
508 .act = tcf_mirred_act,
509 .stats_update = tcf_stats_update,
510 .dump = tcf_mirred_dump,
511 .cleanup = tcf_mirred_release,
512 .init = tcf_mirred_init,
513 .walk = tcf_mirred_walker,
514 .lookup = tcf_mirred_search,
515 .get_fill_size = tcf_mirred_get_fill_size,
516 .offload_act_setup = tcf_mirred_offload_act_setup,
517 .size = sizeof(struct tcf_mirred),
518 .get_dev = tcf_mirred_get_dev,
521 static __net_init int mirred_init_net(struct net *net)
523 struct tc_action_net *tn = net_generic(net, mirred_net_id);
525 return tc_action_net_init(net, tn, &act_mirred_ops);
528 static void __net_exit mirred_exit_net(struct list_head *net_list)
530 tc_action_net_exit(net_list, mirred_net_id);
533 static struct pernet_operations mirred_net_ops = {
534 .init = mirred_init_net,
535 .exit_batch = mirred_exit_net,
536 .id = &mirred_net_id,
537 .size = sizeof(struct tc_action_net),
540 MODULE_AUTHOR("Jamal Hadi Salim(2002)");
541 MODULE_DESCRIPTION("Device Mirror/redirect actions");
542 MODULE_LICENSE("GPL");
544 static int __init mirred_init_module(void)
546 int err = register_netdevice_notifier(&mirred_device_notifier);
550 pr_info("Mirror/redirect action on\n");
551 err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
553 unregister_netdevice_notifier(&mirred_device_notifier);
558 static void __exit mirred_cleanup_module(void)
560 tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
561 unregister_netdevice_notifier(&mirred_device_notifier);
564 module_init(mirred_init_module);
565 module_exit(mirred_cleanup_module);