1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_api.c Packet scheduler API.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
10 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
11 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/kmod.h>
24 #include <linux/list.h>
25 #include <linux/hrtimer.h>
26 #include <linux/slab.h>
27 #include <linux/hashtable.h>
29 #include <net/net_namespace.h>
31 #include <net/netlink.h>
32 #include <net/pkt_sched.h>
33 #include <net/pkt_cls.h>
35 #include <trace/events/qdisc.h>
42 This file consists of two interrelated parts:
44 1. queueing disciplines manager frontend.
45 2. traffic classes manager frontend.
47 Generally, queueing discipline ("qdisc") is a black box,
48 which is able to enqueue packets and to dequeue them (when
49 device is ready to send something) in order and at times
50 determined by algorithm hidden in it.
52 qdisc's are divided to two categories:
53 - "queues", which have no internal structure visible from outside.
54 - "schedulers", which split all the packets to "traffic classes",
55 using "packet classifiers" (look at cls_api.c)
57 In turn, classes may have child qdiscs (as rule, queues)
58 attached to them etc. etc. etc.
60 The goal of the routines in this file is to translate
61 information supplied by user in the form of handles
62 to more intelligible for kernel form, to make some sanity
63 checks and part of work, which is common to all qdiscs
64 and to provide rtnetlink notifications.
66 All real intelligent work is done inside qdisc modules.
70 Every discipline has two major routines: enqueue and dequeue.
74 dequeue usually returns a skb to send. It is allowed to return NULL,
75 but it does not mean that queue is empty, it just means that
76 discipline does not want to send anything this time.
77 Queue is really empty if q->q.qlen == 0.
78 For complicated disciplines with multiple queues q->q is not
79 real packet queue, but however q->q.qlen must be valid.
83 enqueue returns 0, if packet was enqueued successfully.
84 If packet (this one or another one) was dropped, it returns
86 NET_XMIT_DROP - this packet dropped
87 Expected action: do not backoff, but wait until queue will clear.
88 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
89 Expected action: backoff or ignore
95 like dequeue but without removing a packet from the queue
99 returns qdisc to initial state: purge all buffers, clear all
100 timers, counters (except for statistics) etc.
104 initializes newly created qdisc.
108 destroys resources allocated by init and during lifetime of qdisc.
112 changes qdisc parameters.
115 /* Protects list of registered TC modules. It is pure SMP lock. */
116 static DEFINE_RWLOCK(qdisc_mod_lock);
119 /************************************************
120 * Queueing disciplines manipulation. *
121 ************************************************/
124 /* The list of all installed queueing disciplines. */
126 static struct Qdisc_ops *qdisc_base;
128 /* Register/unregister queueing discipline */
130 int register_qdisc(struct Qdisc_ops *qops)
132 struct Qdisc_ops *q, **qp;
135 write_lock(&qdisc_mod_lock);
136 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
137 if (!strcmp(qops->id, q->id))
140 if (qops->enqueue == NULL)
141 qops->enqueue = noop_qdisc_ops.enqueue;
142 if (qops->peek == NULL) {
143 if (qops->dequeue == NULL)
144 qops->peek = noop_qdisc_ops.peek;
148 if (qops->dequeue == NULL)
149 qops->dequeue = noop_qdisc_ops.dequeue;
152 const struct Qdisc_class_ops *cops = qops->cl_ops;
154 if (!(cops->find && cops->walk && cops->leaf))
157 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
165 write_unlock(&qdisc_mod_lock);
172 EXPORT_SYMBOL(register_qdisc);
174 int unregister_qdisc(struct Qdisc_ops *qops)
176 struct Qdisc_ops *q, **qp;
179 write_lock(&qdisc_mod_lock);
180 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
188 write_unlock(&qdisc_mod_lock);
191 EXPORT_SYMBOL(unregister_qdisc);
193 /* Get default qdisc if not otherwise specified */
194 void qdisc_get_default(char *name, size_t len)
196 read_lock(&qdisc_mod_lock);
197 strlcpy(name, default_qdisc_ops->id, len);
198 read_unlock(&qdisc_mod_lock);
201 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
203 struct Qdisc_ops *q = NULL;
205 for (q = qdisc_base; q; q = q->next) {
206 if (!strcmp(name, q->id)) {
207 if (!try_module_get(q->owner))
216 /* Set new default qdisc to use */
217 int qdisc_set_default(const char *name)
219 const struct Qdisc_ops *ops;
221 if (!capable(CAP_NET_ADMIN))
224 write_lock(&qdisc_mod_lock);
225 ops = qdisc_lookup_default(name);
227 /* Not found, drop lock and try to load module */
228 write_unlock(&qdisc_mod_lock);
229 request_module("sch_%s", name);
230 write_lock(&qdisc_mod_lock);
232 ops = qdisc_lookup_default(name);
236 /* Set new default */
237 module_put(default_qdisc_ops->owner);
238 default_qdisc_ops = ops;
240 write_unlock(&qdisc_mod_lock);
242 return ops ? 0 : -ENOENT;
245 #ifdef CONFIG_NET_SCH_DEFAULT
246 /* Set default value from kernel config */
247 static int __init sch_default_qdisc(void)
249 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
251 late_initcall(sch_default_qdisc);
254 /* We know handle. Find qdisc among all qdisc's attached to device
255 * (root qdisc, all its children, children of children etc.)
256 * Note: caller either uses rtnl or rcu_read_lock()
259 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
263 if (!qdisc_dev(root))
264 return (root->handle == handle ? root : NULL);
266 if (!(root->flags & TCQ_F_BUILTIN) &&
267 root->handle == handle)
270 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
271 lockdep_rtnl_is_held()) {
272 if (q->handle == handle)
278 void qdisc_hash_add(struct Qdisc *q, bool invisible)
280 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
282 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
284 q->flags |= TCQ_F_INVISIBLE;
287 EXPORT_SYMBOL(qdisc_hash_add);
289 void qdisc_hash_del(struct Qdisc *q)
291 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
293 hash_del_rcu(&q->hash);
296 EXPORT_SYMBOL(qdisc_hash_del);
298 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
304 q = qdisc_match_from_root(dev->qdisc, handle);
308 if (dev_ingress_queue(dev))
309 q = qdisc_match_from_root(
310 dev_ingress_queue(dev)->qdisc_sleeping,
316 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
318 struct netdev_queue *nq;
323 q = qdisc_match_from_root(dev->qdisc, handle);
327 nq = dev_ingress_queue_rcu(dev);
329 q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
334 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
337 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
341 cl = cops->find(p, classid);
345 return cops->leaf(p, cl);
348 /* Find queueing discipline by name */
350 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
352 struct Qdisc_ops *q = NULL;
355 read_lock(&qdisc_mod_lock);
356 for (q = qdisc_base; q; q = q->next) {
357 if (nla_strcmp(kind, q->id) == 0) {
358 if (!try_module_get(q->owner))
363 read_unlock(&qdisc_mod_lock);
368 /* The linklayer setting were not transferred from iproute2, in older
369 * versions, and the rate tables lookup systems have been dropped in
370 * the kernel. To keep backward compatible with older iproute2 tc
371 * utils, we detect the linklayer setting by detecting if the rate
372 * table were modified.
374 * For linklayer ATM table entries, the rate table will be aligned to
375 * 48 bytes, thus some table entries will contain the same value. The
376 * mpu (min packet unit) is also encoded into the old rate table, thus
377 * starting from the mpu, we find low and high table entries for
378 * mapping this cell. If these entries contain the same value, when
379 * the rate tables have been modified for linklayer ATM.
381 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
382 * and then roundup to the next cell, calc the table entry one below,
385 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
387 int low = roundup(r->mpu, 48);
388 int high = roundup(low+1, 48);
389 int cell_low = low >> r->cell_log;
390 int cell_high = (high >> r->cell_log) - 1;
392 /* rtab is too inaccurate at rates > 100Mbit/s */
393 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
394 pr_debug("TC linklayer: Giving up ATM detection\n");
395 return TC_LINKLAYER_ETHERNET;
398 if ((cell_high > cell_low) && (cell_high < 256)
399 && (rtab[cell_low] == rtab[cell_high])) {
400 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
401 cell_low, cell_high, rtab[cell_high]);
402 return TC_LINKLAYER_ATM;
404 return TC_LINKLAYER_ETHERNET;
407 static struct qdisc_rate_table *qdisc_rtab_list;
409 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
411 struct netlink_ext_ack *extack)
413 struct qdisc_rate_table *rtab;
415 if (tab == NULL || r->rate == 0 ||
416 r->cell_log == 0 || r->cell_log >= 32 ||
417 nla_len(tab) != TC_RTAB_SIZE) {
418 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
422 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
423 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
424 !memcmp(&rtab->data, nla_data(tab), 1024)) {
430 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
434 memcpy(rtab->data, nla_data(tab), 1024);
435 if (r->linklayer == TC_LINKLAYER_UNAWARE)
436 r->linklayer = __detect_linklayer(r, rtab->data);
437 rtab->next = qdisc_rtab_list;
438 qdisc_rtab_list = rtab;
440 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
444 EXPORT_SYMBOL(qdisc_get_rtab);
446 void qdisc_put_rtab(struct qdisc_rate_table *tab)
448 struct qdisc_rate_table *rtab, **rtabp;
450 if (!tab || --tab->refcnt)
453 for (rtabp = &qdisc_rtab_list;
454 (rtab = *rtabp) != NULL;
455 rtabp = &rtab->next) {
463 EXPORT_SYMBOL(qdisc_put_rtab);
465 static LIST_HEAD(qdisc_stab_list);
467 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
468 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
469 [TCA_STAB_DATA] = { .type = NLA_BINARY },
472 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
473 struct netlink_ext_ack *extack)
475 struct nlattr *tb[TCA_STAB_MAX + 1];
476 struct qdisc_size_table *stab;
477 struct tc_sizespec *s;
478 unsigned int tsize = 0;
482 err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
486 if (!tb[TCA_STAB_BASE]) {
487 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
488 return ERR_PTR(-EINVAL);
491 s = nla_data(tb[TCA_STAB_BASE]);
494 if (!tb[TCA_STAB_DATA]) {
495 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
496 return ERR_PTR(-EINVAL);
498 tab = nla_data(tb[TCA_STAB_DATA]);
499 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
502 if (tsize != s->tsize || (!tab && tsize > 0)) {
503 NL_SET_ERR_MSG(extack, "Invalid size of size table");
504 return ERR_PTR(-EINVAL);
507 list_for_each_entry(stab, &qdisc_stab_list, list) {
508 if (memcmp(&stab->szopts, s, sizeof(*s)))
510 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
516 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
518 return ERR_PTR(-ENOMEM);
523 memcpy(stab->data, tab, tsize * sizeof(u16));
525 list_add_tail(&stab->list, &qdisc_stab_list);
530 void qdisc_put_stab(struct qdisc_size_table *tab)
535 if (--tab->refcnt == 0) {
536 list_del(&tab->list);
540 EXPORT_SYMBOL(qdisc_put_stab);
542 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
546 nest = nla_nest_start_noflag(skb, TCA_STAB);
548 goto nla_put_failure;
549 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
550 goto nla_put_failure;
551 nla_nest_end(skb, nest);
559 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
560 const struct qdisc_size_table *stab)
564 pkt_len = skb->len + stab->szopts.overhead;
565 if (unlikely(!stab->szopts.tsize))
568 slot = pkt_len + stab->szopts.cell_align;
569 if (unlikely(slot < 0))
572 slot >>= stab->szopts.cell_log;
573 if (likely(slot < stab->szopts.tsize))
574 pkt_len = stab->data[slot];
576 pkt_len = stab->data[stab->szopts.tsize - 1] *
577 (slot / stab->szopts.tsize) +
578 stab->data[slot % stab->szopts.tsize];
580 pkt_len <<= stab->szopts.size_log;
582 if (unlikely(pkt_len < 1))
584 qdisc_skb_cb(skb)->pkt_len = pkt_len;
586 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
588 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
590 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
591 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
592 txt, qdisc->ops->id, qdisc->handle >> 16);
593 qdisc->flags |= TCQ_F_WARN_NONWC;
596 EXPORT_SYMBOL(qdisc_warn_nonwc);
598 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
600 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
604 __netif_schedule(qdisc_root(wd->qdisc));
607 return HRTIMER_NORESTART;
610 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
613 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
614 wd->timer.function = qdisc_watchdog;
617 EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
619 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
621 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
623 EXPORT_SYMBOL(qdisc_watchdog_init);
625 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
628 if (test_bit(__QDISC_STATE_DEACTIVATED,
629 &qdisc_root_sleeping(wd->qdisc)->state))
632 if (hrtimer_is_queued(&wd->timer)) {
633 /* If timer is already set in [expires, expires + delta_ns],
634 * do not reprogram it.
636 if (wd->last_expires - expires <= delta_ns)
640 wd->last_expires = expires;
641 hrtimer_start_range_ns(&wd->timer,
642 ns_to_ktime(expires),
644 HRTIMER_MODE_ABS_PINNED);
646 EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
648 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
650 hrtimer_cancel(&wd->timer);
652 EXPORT_SYMBOL(qdisc_watchdog_cancel);
654 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
656 struct hlist_head *h;
659 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
662 for (i = 0; i < n; i++)
663 INIT_HLIST_HEAD(&h[i]);
668 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
670 struct Qdisc_class_common *cl;
671 struct hlist_node *next;
672 struct hlist_head *nhash, *ohash;
673 unsigned int nsize, nmask, osize;
676 /* Rehash when load factor exceeds 0.75 */
677 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
679 nsize = clhash->hashsize * 2;
681 nhash = qdisc_class_hash_alloc(nsize);
685 ohash = clhash->hash;
686 osize = clhash->hashsize;
689 for (i = 0; i < osize; i++) {
690 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
691 h = qdisc_class_hash(cl->classid, nmask);
692 hlist_add_head(&cl->hnode, &nhash[h]);
695 clhash->hash = nhash;
696 clhash->hashsize = nsize;
697 clhash->hashmask = nmask;
698 sch_tree_unlock(sch);
702 EXPORT_SYMBOL(qdisc_class_hash_grow);
704 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
706 unsigned int size = 4;
708 clhash->hash = qdisc_class_hash_alloc(size);
711 clhash->hashsize = size;
712 clhash->hashmask = size - 1;
713 clhash->hashelems = 0;
716 EXPORT_SYMBOL(qdisc_class_hash_init);
718 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
720 kvfree(clhash->hash);
722 EXPORT_SYMBOL(qdisc_class_hash_destroy);
724 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
725 struct Qdisc_class_common *cl)
729 INIT_HLIST_NODE(&cl->hnode);
730 h = qdisc_class_hash(cl->classid, clhash->hashmask);
731 hlist_add_head(&cl->hnode, &clhash->hash[h]);
734 EXPORT_SYMBOL(qdisc_class_hash_insert);
736 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
737 struct Qdisc_class_common *cl)
739 hlist_del(&cl->hnode);
742 EXPORT_SYMBOL(qdisc_class_hash_remove);
744 /* Allocate an unique handle from space managed by kernel
745 * Possible range is [8000-FFFF]:0000 (0x8000 values)
747 static u32 qdisc_alloc_handle(struct net_device *dev)
750 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
753 autohandle += TC_H_MAKE(0x10000U, 0);
754 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
755 autohandle = TC_H_MAKE(0x80000000U, 0);
756 if (!qdisc_lookup(dev, autohandle))
764 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
766 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
767 const struct Qdisc_class_ops *cops;
773 if (n == 0 && len == 0)
775 drops = max_t(int, n, 0);
777 while ((parentid = sch->parent)) {
778 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
781 if (sch->flags & TCQ_F_NOPARENT)
783 /* Notify parent qdisc only if child qdisc becomes empty.
785 * If child was empty even before update then backlog
786 * counter is screwed and we skip notification because
787 * parent class is already passive.
789 * If the original child was offloaded then it is allowed
790 * to be seem as empty, so the parent is notified anyway.
792 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
793 !qdisc_is_offloaded);
794 /* TODO: perform the search on a per txq basis */
795 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
797 WARN_ON_ONCE(parentid != TC_H_ROOT);
800 cops = sch->ops->cl_ops;
801 if (notify && cops->qlen_notify) {
802 cl = cops->find(sch, parentid);
803 cops->qlen_notify(sch, cl);
806 sch->qstats.backlog -= len;
807 __qdisc_qstats_drop(sch, drops);
811 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
813 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
816 struct net_device *dev = qdisc_dev(sch);
819 sch->flags &= ~TCQ_F_OFFLOADED;
820 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
823 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
824 if (err == -EOPNOTSUPP)
828 sch->flags |= TCQ_F_OFFLOADED;
832 EXPORT_SYMBOL(qdisc_offload_dump_helper);
834 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
835 struct Qdisc *new, struct Qdisc *old,
836 enum tc_setup_type type, void *type_data,
837 struct netlink_ext_ack *extack)
839 bool any_qdisc_is_offloaded;
842 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
845 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
847 /* Don't report error if the graft is part of destroy operation. */
848 if (!err || !new || new == &noop_qdisc)
851 /* Don't report error if the parent, the old child and the new
852 * one are not offloaded.
854 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
855 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
856 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
858 if (any_qdisc_is_offloaded)
859 NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
861 EXPORT_SYMBOL(qdisc_offload_graft_helper);
863 static void qdisc_offload_graft_root(struct net_device *dev,
864 struct Qdisc *new, struct Qdisc *old,
865 struct netlink_ext_ack *extack)
867 struct tc_root_qopt_offload graft_offload = {
868 .command = TC_ROOT_GRAFT,
869 .handle = new ? new->handle : 0,
870 .ingress = (new && new->flags & TCQ_F_INGRESS) ||
871 (old && old->flags & TCQ_F_INGRESS),
874 qdisc_offload_graft_helper(dev, NULL, new, old,
875 TC_SETUP_ROOT_QDISC, &graft_offload, extack);
878 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
879 u32 portid, u32 seq, u16 flags, int event)
881 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
882 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
884 struct nlmsghdr *nlh;
885 unsigned char *b = skb_tail_pointer(skb);
887 struct qdisc_size_table *stab;
892 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
895 tcm = nlmsg_data(nlh);
896 tcm->tcm_family = AF_UNSPEC;
899 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
900 tcm->tcm_parent = clid;
901 tcm->tcm_handle = q->handle;
902 tcm->tcm_info = refcount_read(&q->refcnt);
903 if (nla_put_string(skb, TCA_KIND, q->ops->id))
904 goto nla_put_failure;
905 if (q->ops->ingress_block_get) {
906 block_index = q->ops->ingress_block_get(q);
908 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
909 goto nla_put_failure;
911 if (q->ops->egress_block_get) {
912 block_index = q->ops->egress_block_get(q);
914 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
915 goto nla_put_failure;
917 if (q->ops->dump && q->ops->dump(q, skb) < 0)
918 goto nla_put_failure;
919 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
920 goto nla_put_failure;
921 qlen = qdisc_qlen_sum(q);
923 stab = rtnl_dereference(q->stab);
924 if (stab && qdisc_dump_stab(skb, stab) < 0)
925 goto nla_put_failure;
927 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
928 NULL, &d, TCA_PAD) < 0)
929 goto nla_put_failure;
931 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
932 goto nla_put_failure;
934 if (qdisc_is_percpu_stats(q)) {
935 cpu_bstats = q->cpu_bstats;
936 cpu_qstats = q->cpu_qstats;
939 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
940 &d, cpu_bstats, &q->bstats) < 0 ||
941 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
942 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
943 goto nla_put_failure;
945 if (gnet_stats_finish_copy(&d) < 0)
946 goto nla_put_failure;
948 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
957 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
959 if (q->flags & TCQ_F_BUILTIN)
961 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
967 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
968 struct nlmsghdr *n, u32 clid,
969 struct Qdisc *old, struct Qdisc *new)
972 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
974 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
978 if (old && !tc_qdisc_dump_ignore(old, false)) {
979 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
980 0, RTM_DELQDISC) < 0)
983 if (new && !tc_qdisc_dump_ignore(new, false)) {
984 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
985 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
990 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
991 n->nlmsg_flags & NLM_F_ECHO);
998 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
999 struct nlmsghdr *n, u32 clid,
1000 struct Qdisc *old, struct Qdisc *new)
1003 qdisc_notify(net, skb, n, clid, old, new);
1009 static void qdisc_clear_nolock(struct Qdisc *sch)
1011 sch->flags &= ~TCQ_F_NOLOCK;
1012 if (!(sch->flags & TCQ_F_CPUSTATS))
1015 free_percpu(sch->cpu_bstats);
1016 free_percpu(sch->cpu_qstats);
1017 sch->cpu_bstats = NULL;
1018 sch->cpu_qstats = NULL;
1019 sch->flags &= ~TCQ_F_CPUSTATS;
1022 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
1025 * When appropriate send a netlink notification using 'skb'
1028 * On success, destroy old qdisc.
1031 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1032 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1033 struct Qdisc *new, struct Qdisc *old,
1034 struct netlink_ext_ack *extack)
1036 struct Qdisc *q = old;
1037 struct net *net = dev_net(dev);
1039 if (parent == NULL) {
1040 unsigned int i, num_q, ingress;
1043 num_q = dev->num_tx_queues;
1044 if ((q && q->flags & TCQ_F_INGRESS) ||
1045 (new && new->flags & TCQ_F_INGRESS)) {
1048 if (!dev_ingress_queue(dev)) {
1049 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1054 if (dev->flags & IFF_UP)
1055 dev_deactivate(dev);
1057 qdisc_offload_graft_root(dev, new, old, extack);
1059 if (new && new->ops->attach)
1062 for (i = 0; i < num_q; i++) {
1063 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
1066 dev_queue = netdev_get_tx_queue(dev, i);
1068 old = dev_graft_qdisc(dev_queue, new);
1070 qdisc_refcount_inc(new);
1078 notify_and_destroy(net, skb, n, classid,
1080 if (new && !new->ops->attach)
1081 qdisc_refcount_inc(new);
1082 dev->qdisc = new ? : &noop_qdisc;
1084 if (new && new->ops->attach)
1085 new->ops->attach(new);
1087 notify_and_destroy(net, skb, n, classid, old, new);
1090 if (dev->flags & IFF_UP)
1093 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1097 /* Only support running class lockless if parent is lockless */
1098 if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
1099 qdisc_clear_nolock(new);
1101 if (!cops || !cops->graft)
1104 cl = cops->find(parent, classid);
1106 NL_SET_ERR_MSG(extack, "Specified class not found");
1110 err = cops->graft(parent, cl, new, &old, extack);
1113 notify_and_destroy(net, skb, n, classid, old, new);
1118 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1119 struct netlink_ext_ack *extack)
1123 if (tca[TCA_INGRESS_BLOCK]) {
1124 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1127 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1130 if (!sch->ops->ingress_block_set) {
1131 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1134 sch->ops->ingress_block_set(sch, block_index);
1136 if (tca[TCA_EGRESS_BLOCK]) {
1137 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1140 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1143 if (!sch->ops->egress_block_set) {
1144 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1147 sch->ops->egress_block_set(sch, block_index);
1153 Allocate and initialize new qdisc.
1155 Parameters are passed via opt.
1158 static struct Qdisc *qdisc_create(struct net_device *dev,
1159 struct netdev_queue *dev_queue,
1160 struct Qdisc *p, u32 parent, u32 handle,
1161 struct nlattr **tca, int *errp,
1162 struct netlink_ext_ack *extack)
1165 struct nlattr *kind = tca[TCA_KIND];
1167 struct Qdisc_ops *ops;
1168 struct qdisc_size_table *stab;
1170 ops = qdisc_lookup_ops(kind);
1171 #ifdef CONFIG_MODULES
1172 if (ops == NULL && kind != NULL) {
1173 char name[IFNAMSIZ];
1174 if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
1175 /* We dropped the RTNL semaphore in order to
1176 * perform the module load. So, even if we
1177 * succeeded in loading the module we have to
1178 * tell the caller to replay the request. We
1179 * indicate this using -EAGAIN.
1180 * We replay the request because the device may
1181 * go away in the mean time.
1184 request_module("sch_%s", name);
1186 ops = qdisc_lookup_ops(kind);
1188 /* We will try again qdisc_lookup_ops,
1189 * so don't keep a reference.
1191 module_put(ops->owner);
1201 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1205 sch = qdisc_alloc(dev_queue, ops, extack);
1211 sch->parent = parent;
1213 if (handle == TC_H_INGRESS) {
1214 sch->flags |= TCQ_F_INGRESS;
1215 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1218 handle = qdisc_alloc_handle(dev);
1220 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1225 if (!netif_is_multiqueue(dev))
1226 sch->flags |= TCQ_F_ONETXQUEUE;
1229 sch->handle = handle;
1231 /* This exist to keep backward compatible with a userspace
1232 * loophole, what allowed userspace to get IFF_NO_QUEUE
1233 * facility on older kernels by setting tx_queue_len=0 (prior
1234 * to qdisc init), and then forgot to reinit tx_queue_len
1235 * before again attaching a qdisc.
1237 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1238 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1239 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1242 err = qdisc_block_indexes_set(sch, tca, extack);
1247 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1252 if (tca[TCA_STAB]) {
1253 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1255 err = PTR_ERR(stab);
1258 rcu_assign_pointer(sch->stab, stab);
1260 if (tca[TCA_RATE]) {
1261 seqcount_t *running;
1264 if (sch->flags & TCQ_F_MQROOT) {
1265 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1269 if (sch->parent != TC_H_ROOT &&
1270 !(sch->flags & TCQ_F_INGRESS) &&
1271 (!p || !(p->flags & TCQ_F_MQROOT)))
1272 running = qdisc_root_sleeping_running(sch);
1274 running = &sch->running;
1276 err = gen_new_estimator(&sch->bstats,
1283 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1288 qdisc_hash_add(sch, false);
1289 trace_qdisc_create(ops, dev, parent);
1294 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1301 module_put(ops->owner);
1308 * Any broken qdiscs that would require a ops->reset() here?
1309 * The qdisc was never in action so it shouldn't be necessary.
1311 qdisc_put_stab(rtnl_dereference(sch->stab));
1317 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1318 struct netlink_ext_ack *extack)
1320 struct qdisc_size_table *ostab, *stab = NULL;
1323 if (tca[TCA_OPTIONS]) {
1324 if (!sch->ops->change) {
1325 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1328 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1329 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1332 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1337 if (tca[TCA_STAB]) {
1338 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1340 return PTR_ERR(stab);
1343 ostab = rtnl_dereference(sch->stab);
1344 rcu_assign_pointer(sch->stab, stab);
1345 qdisc_put_stab(ostab);
1347 if (tca[TCA_RATE]) {
1348 /* NB: ignores errors from replace_estimator
1349 because change can't be undone. */
1350 if (sch->flags & TCQ_F_MQROOT)
1352 gen_replace_estimator(&sch->bstats,
1356 qdisc_root_sleeping_running(sch),
1363 struct check_loop_arg {
1364 struct qdisc_walker w;
1369 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1370 struct qdisc_walker *w);
1372 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1374 struct check_loop_arg arg;
1376 if (q->ops->cl_ops == NULL)
1379 arg.w.stop = arg.w.skip = arg.w.count = 0;
1380 arg.w.fn = check_loop_fn;
1383 q->ops->cl_ops->walk(q, &arg.w);
1384 return arg.w.stop ? -ELOOP : 0;
1388 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1391 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1392 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1394 leaf = cops->leaf(q, cl);
1396 if (leaf == arg->p || arg->depth > 7)
1398 return check_loop(leaf, arg->p, arg->depth + 1);
1403 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1404 [TCA_KIND] = { .type = NLA_STRING },
1405 [TCA_RATE] = { .type = NLA_BINARY,
1406 .len = sizeof(struct tc_estimator) },
1407 [TCA_STAB] = { .type = NLA_NESTED },
1408 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
1409 [TCA_CHAIN] = { .type = NLA_U32 },
1410 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
1411 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1418 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1419 struct netlink_ext_ack *extack)
1421 struct net *net = sock_net(skb->sk);
1422 struct tcmsg *tcm = nlmsg_data(n);
1423 struct nlattr *tca[TCA_MAX + 1];
1424 struct net_device *dev;
1426 struct Qdisc *q = NULL;
1427 struct Qdisc *p = NULL;
1430 if ((n->nlmsg_type != RTM_GETQDISC) &&
1431 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1434 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1435 rtm_tca_policy, extack);
1439 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1443 clid = tcm->tcm_parent;
1445 if (clid != TC_H_ROOT) {
1446 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1447 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1449 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1452 q = qdisc_leaf(p, clid);
1453 } else if (dev_ingress_queue(dev)) {
1454 q = dev_ingress_queue(dev)->qdisc_sleeping;
1460 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1464 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1465 NL_SET_ERR_MSG(extack, "Invalid handle");
1469 q = qdisc_lookup(dev, tcm->tcm_handle);
1471 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1476 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1477 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1481 if (n->nlmsg_type == RTM_DELQDISC) {
1483 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1486 if (q->handle == 0) {
1487 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1490 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1494 qdisc_notify(net, skb, n, clid, NULL, q);
1500 * Create/change qdisc.
1503 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1504 struct netlink_ext_ack *extack)
1506 struct net *net = sock_net(skb->sk);
1508 struct nlattr *tca[TCA_MAX + 1];
1509 struct net_device *dev;
1511 struct Qdisc *q, *p;
1514 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1518 /* Reinit, just in case something touches this. */
1519 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1520 rtm_tca_policy, extack);
1524 tcm = nlmsg_data(n);
1525 clid = tcm->tcm_parent;
1528 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1534 if (clid != TC_H_ROOT) {
1535 if (clid != TC_H_INGRESS) {
1536 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1538 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1541 q = qdisc_leaf(p, clid);
1542 } else if (dev_ingress_queue_create(dev)) {
1543 q = dev_ingress_queue(dev)->qdisc_sleeping;
1549 /* It may be default qdisc, ignore it */
1550 if (q && q->handle == 0)
1553 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1554 if (tcm->tcm_handle) {
1555 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1556 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1559 if (TC_H_MIN(tcm->tcm_handle)) {
1560 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1563 q = qdisc_lookup(dev, tcm->tcm_handle);
1565 goto create_n_graft;
1566 if (n->nlmsg_flags & NLM_F_EXCL) {
1567 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1570 if (tca[TCA_KIND] &&
1571 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1572 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1576 (p && check_loop(q, p, 0))) {
1577 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1580 qdisc_refcount_inc(q);
1584 goto create_n_graft;
1586 /* This magic test requires explanation.
1588 * We know, that some child q is already
1589 * attached to this parent and have choice:
1590 * either to change it or to create/graft new one.
1592 * 1. We are allowed to create/graft only
1593 * if CREATE and REPLACE flags are set.
1595 * 2. If EXCL is set, requestor wanted to say,
1596 * that qdisc tcm_handle is not expected
1597 * to exist, so that we choose create/graft too.
1599 * 3. The last case is when no flags are set.
1600 * Alas, it is sort of hole in API, we
1601 * cannot decide what to do unambiguously.
1602 * For now we select create/graft, if
1603 * user gave KIND, which does not match existing.
1605 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1606 (n->nlmsg_flags & NLM_F_REPLACE) &&
1607 ((n->nlmsg_flags & NLM_F_EXCL) ||
1609 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1610 goto create_n_graft;
1614 if (!tcm->tcm_handle) {
1615 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1618 q = qdisc_lookup(dev, tcm->tcm_handle);
1621 /* Change qdisc parameters */
1623 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1626 if (n->nlmsg_flags & NLM_F_EXCL) {
1627 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1630 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1631 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1634 err = qdisc_change(q, tca, extack);
1636 qdisc_notify(net, skb, n, clid, NULL, q);
1640 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1641 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1644 if (clid == TC_H_INGRESS) {
1645 if (dev_ingress_queue(dev)) {
1646 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1647 tcm->tcm_parent, tcm->tcm_parent,
1650 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1654 struct netdev_queue *dev_queue;
1656 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1657 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1659 dev_queue = p->dev_queue;
1661 dev_queue = netdev_get_tx_queue(dev, 0);
1663 q = qdisc_create(dev, dev_queue, p,
1664 tcm->tcm_parent, tcm->tcm_handle,
1674 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1684 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1685 struct netlink_callback *cb,
1686 int *q_idx_p, int s_q_idx, bool recur,
1687 bool dump_invisible)
1689 int ret = 0, q_idx = *q_idx_p;
1697 if (q_idx < s_q_idx) {
1700 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1701 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1702 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1708 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1709 * itself has already been dumped.
1711 * If we've already dumped the top-level (ingress) qdisc above and the global
1712 * qdisc hashtable, we don't want to hit it again
1714 if (!qdisc_dev(root) || !recur)
1717 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1718 if (q_idx < s_q_idx) {
1722 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1723 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1724 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1738 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1740 struct net *net = sock_net(skb->sk);
1743 struct net_device *dev;
1744 const struct nlmsghdr *nlh = cb->nlh;
1745 struct nlattr *tca[TCA_MAX + 1];
1748 s_idx = cb->args[0];
1749 s_q_idx = q_idx = cb->args[1];
1754 err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1755 rtm_tca_policy, cb->extack);
1759 for_each_netdev(net, dev) {
1760 struct netdev_queue *dev_queue;
1768 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1769 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1772 dev_queue = dev_ingress_queue(dev);
1774 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1775 &q_idx, s_q_idx, false,
1776 tca[TCA_DUMP_INVISIBLE]) < 0)
1785 cb->args[1] = q_idx;
1792 /************************************************
1793 * Traffic classes manipulation. *
1794 ************************************************/
1796 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1798 u32 portid, u32 seq, u16 flags, int event)
1801 struct nlmsghdr *nlh;
1802 unsigned char *b = skb_tail_pointer(skb);
1804 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1807 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1809 goto out_nlmsg_trim;
1810 tcm = nlmsg_data(nlh);
1811 tcm->tcm_family = AF_UNSPEC;
1814 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1815 tcm->tcm_parent = q->handle;
1816 tcm->tcm_handle = q->handle;
1818 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1819 goto nla_put_failure;
1820 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1821 goto nla_put_failure;
1823 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1824 NULL, &d, TCA_PAD) < 0)
1825 goto nla_put_failure;
1827 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1828 goto nla_put_failure;
1830 if (gnet_stats_finish_copy(&d) < 0)
1831 goto nla_put_failure;
1833 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1842 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1843 struct nlmsghdr *n, struct Qdisc *q,
1844 unsigned long cl, int event)
1846 struct sk_buff *skb;
1847 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1850 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1854 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1859 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1860 n->nlmsg_flags & NLM_F_ECHO);
1866 static int tclass_del_notify(struct net *net,
1867 const struct Qdisc_class_ops *cops,
1868 struct sk_buff *oskb, struct nlmsghdr *n,
1869 struct Qdisc *q, unsigned long cl,
1870 struct netlink_ext_ack *extack)
1872 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1873 struct sk_buff *skb;
1879 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1883 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1884 RTM_DELTCLASS) < 0) {
1889 err = cops->delete(q, cl, extack);
1895 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1896 n->nlmsg_flags & NLM_F_ECHO);
1902 #ifdef CONFIG_NET_CLS
1904 struct tcf_bind_args {
1905 struct tcf_walker w;
1911 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1913 struct tcf_bind_args *a = (void *)arg;
1915 if (tp->ops->bind_class) {
1916 struct Qdisc *q = tcf_block_q(tp->chain->block);
1919 tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
1925 struct tc_bind_class_args {
1926 struct qdisc_walker w;
1927 unsigned long new_cl;
1932 static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
1933 struct qdisc_walker *w)
1935 struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
1936 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1937 struct tcf_block *block;
1938 struct tcf_chain *chain;
1940 block = cops->tcf_block(q, cl, NULL);
1943 for (chain = tcf_get_next_chain(block, NULL);
1945 chain = tcf_get_next_chain(block, chain)) {
1946 struct tcf_proto *tp;
1948 for (tp = tcf_get_next_proto(chain, NULL);
1949 tp; tp = tcf_get_next_proto(chain, tp)) {
1950 struct tcf_bind_args arg = {};
1952 arg.w.fn = tcf_node_bind;
1953 arg.classid = a->clid;
1956 tp->ops->walk(tp, &arg.w, true);
1963 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1964 unsigned long new_cl)
1966 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1967 struct tc_bind_class_args args = {};
1969 if (!cops->tcf_block)
1971 args.portid = portid;
1973 args.new_cl = new_cl;
1974 args.w.fn = tc_bind_class_walker;
1975 q->ops->cl_ops->walk(q, &args.w);
1980 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1981 unsigned long new_cl)
1987 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1988 struct netlink_ext_ack *extack)
1990 struct net *net = sock_net(skb->sk);
1991 struct tcmsg *tcm = nlmsg_data(n);
1992 struct nlattr *tca[TCA_MAX + 1];
1993 struct net_device *dev;
1994 struct Qdisc *q = NULL;
1995 const struct Qdisc_class_ops *cops;
1996 unsigned long cl = 0;
1997 unsigned long new_cl;
2003 if ((n->nlmsg_type != RTM_GETTCLASS) &&
2004 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2007 err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
2008 rtm_tca_policy, extack);
2012 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2017 parent == TC_H_UNSPEC - unspecified parent.
2018 parent == TC_H_ROOT - class is root, which has no parent.
2019 parent == X:0 - parent is root class.
2020 parent == X:Y - parent is a node in hierarchy.
2021 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
2023 handle == 0:0 - generate handle from kernel pool.
2024 handle == 0:Y - class is X:Y, where X:0 is qdisc.
2025 handle == X:Y - clear.
2026 handle == X:0 - root class.
2029 /* Step 1. Determine qdisc handle X:0 */
2031 portid = tcm->tcm_parent;
2032 clid = tcm->tcm_handle;
2033 qid = TC_H_MAJ(clid);
2035 if (portid != TC_H_ROOT) {
2036 u32 qid1 = TC_H_MAJ(portid);
2039 /* If both majors are known, they must be identical. */
2044 } else if (qid == 0)
2045 qid = dev->qdisc->handle;
2047 /* Now qid is genuine qdisc handle consistent
2048 * both with parent and child.
2050 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2053 portid = TC_H_MAKE(qid, portid);
2056 qid = dev->qdisc->handle;
2059 /* OK. Locate qdisc */
2060 q = qdisc_lookup(dev, qid);
2064 /* An check that it supports classes */
2065 cops = q->ops->cl_ops;
2069 /* Now try to get class */
2071 if (portid == TC_H_ROOT)
2074 clid = TC_H_MAKE(qid, clid);
2077 cl = cops->find(q, clid);
2081 if (n->nlmsg_type != RTM_NEWTCLASS ||
2082 !(n->nlmsg_flags & NLM_F_CREATE))
2085 switch (n->nlmsg_type) {
2088 if (n->nlmsg_flags & NLM_F_EXCL)
2092 err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
2093 /* Unbind the class with flilters with 0 */
2094 tc_bind_tclass(q, portid, clid, 0);
2097 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
2105 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2106 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2113 err = cops->change(q, clid, portid, tca, &new_cl, extack);
2115 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
2116 /* We just create a new class, need to do reverse binding. */
2118 tc_bind_tclass(q, portid, clid, new_cl);
2124 struct qdisc_dump_args {
2125 struct qdisc_walker w;
2126 struct sk_buff *skb;
2127 struct netlink_callback *cb;
2130 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2131 struct qdisc_walker *arg)
2133 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2135 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2136 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2140 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2141 struct tcmsg *tcm, struct netlink_callback *cb,
2144 struct qdisc_dump_args arg;
2146 if (tc_qdisc_dump_ignore(q, false) ||
2147 *t_p < s_t || !q->ops->cl_ops ||
2149 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2154 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2155 arg.w.fn = qdisc_class_dump;
2159 arg.w.skip = cb->args[1];
2161 q->ops->cl_ops->walk(q, &arg.w);
2162 cb->args[1] = arg.w.count;
2169 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2170 struct tcmsg *tcm, struct netlink_callback *cb,
2171 int *t_p, int s_t, bool recur)
2179 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2182 if (!qdisc_dev(root) || !recur)
2185 if (tcm->tcm_parent) {
2186 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2187 if (q && q != root &&
2188 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2192 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2193 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2200 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2202 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2203 struct net *net = sock_net(skb->sk);
2204 struct netdev_queue *dev_queue;
2205 struct net_device *dev;
2208 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2210 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2217 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0)
2220 dev_queue = dev_ingress_queue(dev);
2222 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2223 &t, s_t, false) < 0)
2233 #ifdef CONFIG_PROC_FS
2234 static int psched_show(struct seq_file *seq, void *v)
2236 seq_printf(seq, "%08x %08x %08x %08x\n",
2237 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2239 (u32)NSEC_PER_SEC / hrtimer_resolution);
2244 static int __net_init psched_net_init(struct net *net)
2246 struct proc_dir_entry *e;
2248 e = proc_create_single("psched", 0, net->proc_net, psched_show);
2255 static void __net_exit psched_net_exit(struct net *net)
2257 remove_proc_entry("psched", net->proc_net);
2260 static int __net_init psched_net_init(struct net *net)
2265 static void __net_exit psched_net_exit(struct net *net)
2270 static struct pernet_operations psched_net_ops = {
2271 .init = psched_net_init,
2272 .exit = psched_net_exit,
2275 static int __init pktsched_init(void)
2279 err = register_pernet_subsys(&psched_net_ops);
2281 pr_err("pktsched_init: "
2282 "cannot initialize per netns operations\n");
2286 register_qdisc(&pfifo_fast_ops);
2287 register_qdisc(&pfifo_qdisc_ops);
2288 register_qdisc(&bfifo_qdisc_ops);
2289 register_qdisc(&pfifo_head_drop_qdisc_ops);
2290 register_qdisc(&mq_qdisc_ops);
2291 register_qdisc(&noqueue_qdisc_ops);
2293 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2294 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2295 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2297 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2298 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2299 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2305 subsys_initcall(pktsched_init);