1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_cbq.c Class-Based Queueing discipline.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/pkt_cls.h>
20 /* Class-Based Queueing (CBQ) algorithm.
21 =======================================
23 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
24 Management Models for Packet Networks",
25 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
27 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
29 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
32 [4] Sally Floyd and Michael Speer, "Experimental Results
33 for Class-Based Queueing", 1998, not published.
35 -----------------------------------------------------------------------
37 Algorithm skeleton was taken from NS simulator cbq.cc.
38 If someone wants to check this code against the LBL version,
39 he should take into account that ONLY the skeleton was borrowed,
40 the implementation is different. Particularly:
42 --- The WRR algorithm is different. Our version looks more
43 reasonable (I hope) and works when quanta are allowed to be
44 less than MTU, which is always the case when real time classes
45 have small rates. Note, that the statement of [3] is
46 incomplete, delay may actually be estimated even if class
47 per-round allotment is less than MTU. Namely, if per-round
48 allotment is W*r_i, and r_1+...+r_k = r < 1
50 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
52 In the worst case we have IntServ estimate with D = W*r+k*MTU
53 and C = MTU*r. The proof (if correct at all) is trivial.
56 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
57 interpret some places, which look like wrong translations
58 from NS. Anyone is advised to find these differences
59 and explain to me, why I am wrong 8).
61 --- Linux has no EOI event, so that we cannot estimate true class
62 idle time. Workaround is to consider the next dequeue event
63 as sign that previous packet is finished. This is wrong because of
64 internal device queueing, but on a permanently loaded link it is true.
65 Moreover, combined with clock integrator, this scheme looks
66 very close to an ideal solution. */
68 struct cbq_sched_data;
72 struct Qdisc_class_common common;
73 struct cbq_class *next_alive; /* next class with backlog in this priority band */
76 unsigned char priority; /* class priority */
77 unsigned char priority2; /* priority to be used after overlimit */
78 unsigned char ewma_log; /* time constant for idle time calculation */
82 /* Link-sharing scheduler parameters */
83 long maxidle; /* Class parameters: see below. */
87 struct qdisc_rate_table *R_tab;
89 /* General scheduler (WRR) parameters */
91 long quantum; /* Allotment per WRR round */
92 long weight; /* Relative allotment: see below */
94 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
95 struct cbq_class *split; /* Ptr to split node */
96 struct cbq_class *share; /* Ptr to LS parent in the class tree */
97 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
98 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
100 struct cbq_class *sibling; /* Sibling chain */
101 struct cbq_class *children; /* Pointer to children chain */
103 struct Qdisc *q; /* Elementary queueing discipline */
107 unsigned char cpriority; /* Effective priority */
108 unsigned char delayed;
109 unsigned char level; /* level of the class in hierarchy:
110 0 for leaf classes, and maximal
111 level of children + 1 for nodes.
114 psched_time_t last; /* Last end of service */
115 psched_time_t undertime;
117 long deficit; /* Saved deficit for WRR */
118 psched_time_t penalized;
119 struct gnet_stats_basic_sync bstats;
120 struct gnet_stats_queue qstats;
121 struct net_rate_estimator __rcu *rate_est;
122 struct tc_cbq_xstats xstats;
124 struct tcf_proto __rcu *filter_list;
125 struct tcf_block *block;
129 struct cbq_class *defaults[TC_PRIO_MAX + 1];
132 struct cbq_sched_data {
133 struct Qdisc_class_hash clhash; /* Hash table of all classes */
134 int nclasses[TC_CBQ_MAXPRIO + 1];
135 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
137 struct cbq_class link;
139 unsigned int activemask;
140 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
143 #ifdef CONFIG_NET_CLS_ACT
144 struct cbq_class *rx_class;
146 struct cbq_class *tx_class;
147 struct cbq_class *tx_borrowed;
149 psched_time_t now; /* Cached timestamp */
152 struct hrtimer delay_timer;
153 struct qdisc_watchdog watchdog; /* Watchdog timer,
157 psched_tdiff_t wd_expires;
163 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
165 static inline struct cbq_class *
166 cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
168 struct Qdisc_class_common *clc;
170 clc = qdisc_class_find(&q->clhash, classid);
173 return container_of(clc, struct cbq_class, common);
176 #ifdef CONFIG_NET_CLS_ACT
178 static struct cbq_class *
179 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
181 struct cbq_class *cl;
183 for (cl = this->tparent; cl; cl = cl->tparent) {
184 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
186 if (new != NULL && new != this)
194 /* Classify packet. The procedure is pretty complicated, but
195 * it allows us to combine link sharing and priority scheduling
198 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
199 * so that it resolves to split nodes. Then packets are classified
200 * by logical priority, or a more specific classifier may be attached
204 static struct cbq_class *
205 cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
207 struct cbq_sched_data *q = qdisc_priv(sch);
208 struct cbq_class *head = &q->link;
209 struct cbq_class **defmap;
210 struct cbq_class *cl = NULL;
211 u32 prio = skb->priority;
212 struct tcf_proto *fl;
213 struct tcf_result res;
216 * Step 1. If skb->priority points to one of our classes, use it.
218 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
219 (cl = cbq_class_lookup(q, prio)) != NULL)
222 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
225 defmap = head->defaults;
227 fl = rcu_dereference_bh(head->filter_list);
229 * Step 2+n. Apply classifier.
231 result = tcf_classify(skb, NULL, fl, &res, true);
232 if (!fl || result < 0)
235 cl = (void *)res.class;
237 if (TC_H_MAJ(res.classid))
238 cl = cbq_class_lookup(q, res.classid);
239 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
240 cl = defmap[TC_PRIO_BESTEFFORT];
245 if (cl->level >= head->level)
247 #ifdef CONFIG_NET_CLS_ACT
252 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
256 case TC_ACT_RECLASSIFY:
257 return cbq_reclassify(skb, cl);
264 * Step 3+n. If classifier selected a link sharing class,
265 * apply agency specific classifier.
266 * Repeat this procedure until we hit a leaf node.
275 * Step 4. No success...
277 if (TC_H_MAJ(prio) == 0 &&
278 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
279 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
286 * A packet has just been enqueued on the empty class.
287 * cbq_activate_class adds it to the tail of active class list
288 * of its priority band.
291 static inline void cbq_activate_class(struct cbq_class *cl)
293 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
294 int prio = cl->cpriority;
295 struct cbq_class *cl_tail;
297 cl_tail = q->active[prio];
298 q->active[prio] = cl;
300 if (cl_tail != NULL) {
301 cl->next_alive = cl_tail->next_alive;
302 cl_tail->next_alive = cl;
305 q->activemask |= (1<<prio);
310 * Unlink class from active chain.
311 * Note that this same procedure is done directly in cbq_dequeue*
312 * during round-robin procedure.
315 static void cbq_deactivate_class(struct cbq_class *this)
317 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
318 int prio = this->cpriority;
319 struct cbq_class *cl;
320 struct cbq_class *cl_prev = q->active[prio];
323 cl = cl_prev->next_alive;
325 cl_prev->next_alive = cl->next_alive;
326 cl->next_alive = NULL;
328 if (cl == q->active[prio]) {
329 q->active[prio] = cl_prev;
330 if (cl == q->active[prio]) {
331 q->active[prio] = NULL;
332 q->activemask &= ~(1<<prio);
338 } while ((cl_prev = cl) != q->active[prio]);
342 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
344 int toplevel = q->toplevel;
346 if (toplevel > cl->level) {
347 psched_time_t now = psched_get_time();
350 if (cl->undertime < now) {
351 q->toplevel = cl->level;
354 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
359 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
360 struct sk_buff **to_free)
362 struct cbq_sched_data *q = qdisc_priv(sch);
364 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
366 #ifdef CONFIG_NET_CLS_ACT
370 if (ret & __NET_XMIT_BYPASS)
371 qdisc_qstats_drop(sch);
372 __qdisc_drop(skb, to_free);
376 ret = qdisc_enqueue(skb, cl->q, to_free);
377 if (ret == NET_XMIT_SUCCESS) {
379 cbq_mark_toplevel(q, cl);
381 cbq_activate_class(cl);
385 if (net_xmit_drop_count(ret)) {
386 qdisc_qstats_drop(sch);
387 cbq_mark_toplevel(q, cl);
393 /* Overlimit action: penalize leaf class by adding offtime */
394 static void cbq_overlimit(struct cbq_class *cl)
396 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
397 psched_tdiff_t delay = cl->undertime - q->now;
400 delay += cl->offtime;
403 * Class goes to sleep, so that it will have no
404 * chance to work avgidle. Let's forgive it 8)
406 * BTW cbq-2.0 has a crap in this
407 * place, apparently they forgot to shift it by cl->ewma_log.
410 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
411 if (cl->avgidle < cl->minidle)
412 cl->avgidle = cl->minidle;
415 cl->undertime = q->now + delay;
417 cl->xstats.overactions++;
420 if (q->wd_expires == 0 || q->wd_expires > delay)
421 q->wd_expires = delay;
423 /* Dirty work! We must schedule wakeups based on
424 * real available rate, rather than leaf rate,
425 * which may be tiny (even zero).
427 if (q->toplevel == TC_CBQ_MAXLEVEL) {
429 psched_tdiff_t base_delay = q->wd_expires;
431 for (b = cl->borrow; b; b = b->borrow) {
432 delay = b->undertime - q->now;
433 if (delay < base_delay) {
440 q->wd_expires = base_delay;
444 static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
447 struct cbq_class *cl;
448 struct cbq_class *cl_prev = q->active[prio];
449 psched_time_t sched = now;
455 cl = cl_prev->next_alive;
456 if (now - cl->penalized > 0) {
457 cl_prev->next_alive = cl->next_alive;
458 cl->next_alive = NULL;
459 cl->cpriority = cl->priority;
461 cbq_activate_class(cl);
463 if (cl == q->active[prio]) {
464 q->active[prio] = cl_prev;
465 if (cl == q->active[prio]) {
466 q->active[prio] = NULL;
471 cl = cl_prev->next_alive;
472 } else if (sched - cl->penalized > 0)
473 sched = cl->penalized;
474 } while ((cl_prev = cl) != q->active[prio]);
479 static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
481 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
483 struct Qdisc *sch = q->watchdog.qdisc;
485 psched_tdiff_t delay = 0;
488 now = psched_get_time();
494 int prio = ffz(~pmask);
499 tmp = cbq_undelay_prio(q, prio, now);
502 if (tmp < delay || delay == 0)
511 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
512 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
515 __netif_schedule(qdisc_root(sch));
516 return HRTIMER_NORESTART;
520 * It is mission critical procedure.
522 * We "regenerate" toplevel cutoff, if transmitting class
523 * has backlog and it is not regulated. It is not part of
524 * original CBQ description, but looks more reasonable.
525 * Probably, it is wrong. This question needs further investigation.
529 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
530 struct cbq_class *borrowed)
532 if (cl && q->toplevel >= borrowed->level) {
533 if (cl->q->q.qlen > 1) {
535 if (borrowed->undertime == PSCHED_PASTPERFECT) {
536 q->toplevel = borrowed->level;
539 } while ((borrowed = borrowed->borrow) != NULL);
542 /* It is not necessary now. Uncommenting it
543 will save CPU cycles, but decrease fairness.
545 q->toplevel = TC_CBQ_MAXLEVEL;
551 cbq_update(struct cbq_sched_data *q)
553 struct cbq_class *this = q->tx_class;
554 struct cbq_class *cl = this;
559 /* Time integrator. We calculate EOS time
560 * by adding expected packet transmission time.
562 now = q->now + L2T(&q->link, len);
564 for ( ; cl; cl = cl->share) {
565 long avgidle = cl->avgidle;
568 _bstats_update(&cl->bstats, len, 1);
571 * (now - last) is total time between packet right edges.
572 * (last_pktlen/rate) is "virtual" busy time, so that
574 * idle = (now - last) - last_pktlen/rate
577 idle = now - cl->last;
578 if ((unsigned long)idle > 128*1024*1024) {
579 avgidle = cl->maxidle;
581 idle -= L2T(cl, len);
583 /* true_avgidle := (1-W)*true_avgidle + W*idle,
584 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
585 * cl->avgidle == true_avgidle/W,
588 avgidle += idle - (avgidle>>cl->ewma_log);
592 /* Overlimit or at-limit */
594 if (avgidle < cl->minidle)
595 avgidle = cl->minidle;
597 cl->avgidle = avgidle;
599 /* Calculate expected time, when this class
600 * will be allowed to send.
601 * It will occur, when:
602 * (1-W)*true_avgidle + W*delay = 0, i.e.
603 * idle = (1/W - 1)*(-true_avgidle)
605 * idle = (1 - W)*(-cl->avgidle);
607 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
611 * To maintain the rate allocated to the class,
612 * we add to undertime virtual clock,
613 * necessary to complete transmitted packet.
614 * (len/phys_bandwidth has been already passed
615 * to the moment of cbq_update)
618 idle -= L2T(&q->link, len);
619 idle += L2T(cl, len);
621 cl->undertime = now + idle;
625 cl->undertime = PSCHED_PASTPERFECT;
626 if (avgidle > cl->maxidle)
627 cl->avgidle = cl->maxidle;
629 cl->avgidle = avgidle;
631 if ((s64)(now - cl->last) > 0)
635 cbq_update_toplevel(q, this, q->tx_borrowed);
638 static inline struct cbq_class *
639 cbq_under_limit(struct cbq_class *cl)
641 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
642 struct cbq_class *this_cl = cl;
644 if (cl->tparent == NULL)
647 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
653 /* It is very suspicious place. Now overlimit
654 * action is generated for not bounded classes
655 * only if link is completely congested.
656 * Though it is in agree with ancestor-only paradigm,
657 * it looks very stupid. Particularly,
658 * it means that this chunk of code will either
659 * never be called or result in strong amplification
660 * of burstiness. Dangerous, silly, and, however,
661 * no another solution exists.
665 this_cl->qstats.overlimits++;
666 cbq_overlimit(this_cl);
669 if (cl->level > q->toplevel)
671 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
677 static inline struct sk_buff *
678 cbq_dequeue_prio(struct Qdisc *sch, int prio)
680 struct cbq_sched_data *q = qdisc_priv(sch);
681 struct cbq_class *cl_tail, *cl_prev, *cl;
685 cl_tail = cl_prev = q->active[prio];
686 cl = cl_prev->next_alive;
693 struct cbq_class *borrow = cl;
696 (borrow = cbq_under_limit(cl)) == NULL)
699 if (cl->deficit <= 0) {
700 /* Class exhausted its allotment per
701 * this round. Switch to the next one.
704 cl->deficit += cl->quantum;
708 skb = cl->q->dequeue(cl->q);
710 /* Class did not give us any skb :-(
711 * It could occur even if cl->q->q.qlen != 0
712 * f.e. if cl->q == "tbf"
717 cl->deficit -= qdisc_pkt_len(skb);
719 q->tx_borrowed = borrow;
721 #ifndef CBQ_XSTATS_BORROWS_BYTES
722 borrow->xstats.borrows++;
723 cl->xstats.borrows++;
725 borrow->xstats.borrows += qdisc_pkt_len(skb);
726 cl->xstats.borrows += qdisc_pkt_len(skb);
729 q->tx_len = qdisc_pkt_len(skb);
731 if (cl->deficit <= 0) {
732 q->active[prio] = cl;
734 cl->deficit += cl->quantum;
739 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
740 /* Class is empty or penalized.
741 * Unlink it from active chain.
743 cl_prev->next_alive = cl->next_alive;
744 cl->next_alive = NULL;
746 /* Did cl_tail point to it? */
751 /* Was it the last class in this band? */
754 q->active[prio] = NULL;
755 q->activemask &= ~(1<<prio);
757 cbq_activate_class(cl);
761 q->active[prio] = cl_tail;
764 cbq_activate_class(cl);
772 } while (cl_prev != cl_tail);
775 q->active[prio] = cl_prev;
780 static inline struct sk_buff *
781 cbq_dequeue_1(struct Qdisc *sch)
783 struct cbq_sched_data *q = qdisc_priv(sch);
785 unsigned int activemask;
787 activemask = q->activemask & 0xFF;
789 int prio = ffz(~activemask);
790 activemask &= ~(1<<prio);
791 skb = cbq_dequeue_prio(sch, prio);
798 static struct sk_buff *
799 cbq_dequeue(struct Qdisc *sch)
802 struct cbq_sched_data *q = qdisc_priv(sch);
805 now = psched_get_time();
815 skb = cbq_dequeue_1(sch);
817 qdisc_bstats_update(sch, skb);
822 /* All the classes are overlimit.
824 * It is possible, if:
826 * 1. Scheduler is empty.
827 * 2. Toplevel cutoff inhibited borrowing.
828 * 3. Root class is overlimit.
830 * Reset 2d and 3d conditions and retry.
832 * Note, that NS and cbq-2.0 are buggy, peeking
833 * an arbitrary class is appropriate for ancestor-only
834 * sharing, but not for toplevel algorithm.
836 * Our version is better, but slower, because it requires
837 * two passes, but it is unavoidable with top-level sharing.
840 if (q->toplevel == TC_CBQ_MAXLEVEL &&
841 q->link.undertime == PSCHED_PASTPERFECT)
844 q->toplevel = TC_CBQ_MAXLEVEL;
845 q->link.undertime = PSCHED_PASTPERFECT;
848 /* No packets in scheduler or nobody wants to give them to us :-(
849 * Sigh... start watchdog timer in the last case.
853 qdisc_qstats_overlimit(sch);
855 qdisc_watchdog_schedule(&q->watchdog,
856 now + q->wd_expires);
861 /* CBQ class maintenance routines */
863 static void cbq_adjust_levels(struct cbq_class *this)
870 struct cbq_class *cl;
875 if (cl->level > level)
877 } while ((cl = cl->sibling) != this->children);
879 this->level = level + 1;
880 } while ((this = this->tparent) != NULL);
883 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
885 struct cbq_class *cl;
888 if (q->quanta[prio] == 0)
891 for (h = 0; h < q->clhash.hashsize; h++) {
892 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
893 /* BUGGGG... Beware! This expression suffer of
894 * arithmetic overflows!
896 if (cl->priority == prio) {
897 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
900 if (cl->quantum <= 0 ||
901 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
902 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
903 cl->common.classid, cl->quantum);
904 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
910 static void cbq_sync_defmap(struct cbq_class *cl)
912 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
913 struct cbq_class *split = cl->split;
920 for (i = 0; i <= TC_PRIO_MAX; i++) {
921 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
922 split->defaults[i] = NULL;
925 for (i = 0; i <= TC_PRIO_MAX; i++) {
926 int level = split->level;
928 if (split->defaults[i])
931 for (h = 0; h < q->clhash.hashsize; h++) {
934 hlist_for_each_entry(c, &q->clhash.hash[h],
936 if (c->split == split && c->level < level &&
937 c->defmap & (1<<i)) {
938 split->defaults[i] = c;
946 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
948 struct cbq_class *split = NULL;
954 splitid = split->common.classid;
957 if (split == NULL || split->common.classid != splitid) {
958 for (split = cl->tparent; split; split = split->tparent)
959 if (split->common.classid == splitid)
966 if (cl->split != split) {
970 cl->defmap = def & mask;
972 cl->defmap = (cl->defmap & ~mask) | (def & mask);
977 static void cbq_unlink_class(struct cbq_class *this)
979 struct cbq_class *cl, **clp;
980 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
982 qdisc_class_hash_remove(&q->clhash, &this->common);
985 clp = &this->sibling;
993 } while ((cl = *clp) != this->sibling);
995 if (this->tparent->children == this) {
996 this->tparent->children = this->sibling;
997 if (this->sibling == this)
998 this->tparent->children = NULL;
1001 WARN_ON(this->sibling != this);
1005 static void cbq_link_class(struct cbq_class *this)
1007 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1008 struct cbq_class *parent = this->tparent;
1010 this->sibling = this;
1011 qdisc_class_hash_insert(&q->clhash, &this->common);
1016 if (parent->children == NULL) {
1017 parent->children = this;
1019 this->sibling = parent->children->sibling;
1020 parent->children->sibling = this;
1025 cbq_reset(struct Qdisc *sch)
1027 struct cbq_sched_data *q = qdisc_priv(sch);
1028 struct cbq_class *cl;
1035 q->tx_borrowed = NULL;
1036 qdisc_watchdog_cancel(&q->watchdog);
1037 hrtimer_cancel(&q->delay_timer);
1038 q->toplevel = TC_CBQ_MAXLEVEL;
1039 q->now = psched_get_time();
1041 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1042 q->active[prio] = NULL;
1044 for (h = 0; h < q->clhash.hashsize; h++) {
1045 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1048 cl->next_alive = NULL;
1049 cl->undertime = PSCHED_PASTPERFECT;
1050 cl->avgidle = cl->maxidle;
1051 cl->deficit = cl->quantum;
1052 cl->cpriority = cl->priority;
1059 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1061 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1062 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1063 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1065 if (lss->change & TCF_CBQ_LSS_EWMA)
1066 cl->ewma_log = lss->ewma_log;
1067 if (lss->change & TCF_CBQ_LSS_AVPKT)
1068 cl->avpkt = lss->avpkt;
1069 if (lss->change & TCF_CBQ_LSS_MINIDLE)
1070 cl->minidle = -(long)lss->minidle;
1071 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
1072 cl->maxidle = lss->maxidle;
1073 cl->avgidle = lss->maxidle;
1075 if (lss->change & TCF_CBQ_LSS_OFFTIME)
1076 cl->offtime = lss->offtime;
1080 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1082 q->nclasses[cl->priority]--;
1083 q->quanta[cl->priority] -= cl->weight;
1084 cbq_normalize_quanta(q, cl->priority);
1087 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1089 q->nclasses[cl->priority]++;
1090 q->quanta[cl->priority] += cl->weight;
1091 cbq_normalize_quanta(q, cl->priority);
1094 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1096 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1099 cl->allot = wrr->allot;
1101 cl->weight = wrr->weight;
1102 if (wrr->priority) {
1103 cl->priority = wrr->priority - 1;
1104 cl->cpriority = cl->priority;
1105 if (cl->priority >= cl->priority2)
1106 cl->priority2 = TC_CBQ_MAXPRIO - 1;
1113 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1115 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1119 static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1120 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1121 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1122 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1123 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1124 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1125 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1126 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1129 static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
1131 struct netlink_ext_ack *extack)
1136 NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
1140 err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
1141 cbq_policy, extack);
1145 if (tb[TCA_CBQ_WRROPT]) {
1146 const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
1148 if (wrr->priority > TC_CBQ_MAXPRIO) {
1149 NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
1156 static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
1157 struct netlink_ext_ack *extack)
1159 struct cbq_sched_data *q = qdisc_priv(sch);
1160 struct nlattr *tb[TCA_CBQ_MAX + 1];
1161 struct tc_ratespec *r;
1164 qdisc_watchdog_init(&q->watchdog, sch);
1165 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1166 q->delay_timer.function = cbq_undelay;
1168 err = cbq_opt_parse(tb, opt, extack);
1172 if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
1173 NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
1177 r = nla_data(tb[TCA_CBQ_RATE]);
1179 q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
1183 err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
1187 err = qdisc_class_hash_init(&q->clhash);
1191 q->link.sibling = &q->link;
1192 q->link.common.classid = sch->handle;
1193 q->link.qdisc = sch;
1194 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1197 q->link.q = &noop_qdisc;
1199 qdisc_hash_add(q->link.q, true);
1201 q->link.priority = TC_CBQ_MAXPRIO - 1;
1202 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1203 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1204 q->link.allot = psched_mtu(qdisc_dev(sch));
1205 q->link.quantum = q->link.allot;
1206 q->link.weight = q->link.R_tab->rate.rate;
1208 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1209 q->link.avpkt = q->link.allot/2;
1210 q->link.minidle = -0x7FFFFFFF;
1212 q->toplevel = TC_CBQ_MAXLEVEL;
1213 q->now = psched_get_time();
1215 cbq_link_class(&q->link);
1217 if (tb[TCA_CBQ_LSSOPT])
1218 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
1220 cbq_addprio(q, &q->link);
1224 tcf_block_put(q->link.block);
1227 qdisc_put_rtab(q->link.R_tab);
1231 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1233 unsigned char *b = skb_tail_pointer(skb);
1235 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1236 goto nla_put_failure;
1244 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1246 unsigned char *b = skb_tail_pointer(skb);
1247 struct tc_cbq_lssopt opt;
1250 if (cl->borrow == NULL)
1251 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1252 if (cl->share == NULL)
1253 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1254 opt.ewma_log = cl->ewma_log;
1255 opt.level = cl->level;
1256 opt.avpkt = cl->avpkt;
1257 opt.maxidle = cl->maxidle;
1258 opt.minidle = (u32)(-cl->minidle);
1259 opt.offtime = cl->offtime;
1261 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1262 goto nla_put_failure;
1270 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1272 unsigned char *b = skb_tail_pointer(skb);
1273 struct tc_cbq_wrropt opt;
1275 memset(&opt, 0, sizeof(opt));
1277 opt.allot = cl->allot;
1278 opt.priority = cl->priority + 1;
1279 opt.cpriority = cl->cpriority + 1;
1280 opt.weight = cl->weight;
1281 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1282 goto nla_put_failure;
1290 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1292 unsigned char *b = skb_tail_pointer(skb);
1293 struct tc_cbq_fopt opt;
1295 if (cl->split || cl->defmap) {
1296 opt.split = cl->split ? cl->split->common.classid : 0;
1297 opt.defmap = cl->defmap;
1299 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1300 goto nla_put_failure;
1309 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1311 if (cbq_dump_lss(skb, cl) < 0 ||
1312 cbq_dump_rate(skb, cl) < 0 ||
1313 cbq_dump_wrr(skb, cl) < 0 ||
1314 cbq_dump_fopt(skb, cl) < 0)
1319 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1321 struct cbq_sched_data *q = qdisc_priv(sch);
1322 struct nlattr *nest;
1324 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1326 goto nla_put_failure;
1327 if (cbq_dump_attr(skb, &q->link) < 0)
1328 goto nla_put_failure;
1329 return nla_nest_end(skb, nest);
1332 nla_nest_cancel(skb, nest);
1337 cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1339 struct cbq_sched_data *q = qdisc_priv(sch);
1341 q->link.xstats.avgidle = q->link.avgidle;
1342 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1346 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1347 struct sk_buff *skb, struct tcmsg *tcm)
1349 struct cbq_class *cl = (struct cbq_class *)arg;
1350 struct nlattr *nest;
1353 tcm->tcm_parent = cl->tparent->common.classid;
1355 tcm->tcm_parent = TC_H_ROOT;
1356 tcm->tcm_handle = cl->common.classid;
1357 tcm->tcm_info = cl->q->handle;
1359 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1361 goto nla_put_failure;
1362 if (cbq_dump_attr(skb, cl) < 0)
1363 goto nla_put_failure;
1364 return nla_nest_end(skb, nest);
1367 nla_nest_cancel(skb, nest);
1372 cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1373 struct gnet_dump *d)
1375 struct cbq_sched_data *q = qdisc_priv(sch);
1376 struct cbq_class *cl = (struct cbq_class *)arg;
1379 cl->xstats.avgidle = cl->avgidle;
1380 cl->xstats.undertime = 0;
1381 qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
1383 if (cl->undertime != PSCHED_PASTPERFECT)
1384 cl->xstats.undertime = cl->undertime - q->now;
1386 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
1387 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1388 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
1391 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1394 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1395 struct Qdisc **old, struct netlink_ext_ack *extack)
1397 struct cbq_class *cl = (struct cbq_class *)arg;
1400 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1401 cl->common.classid, extack);
1406 *old = qdisc_replace(sch, new, &cl->q);
1410 static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
1412 struct cbq_class *cl = (struct cbq_class *)arg;
1417 static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1419 struct cbq_class *cl = (struct cbq_class *)arg;
1421 cbq_deactivate_class(cl);
1424 static unsigned long cbq_find(struct Qdisc *sch, u32 classid)
1426 struct cbq_sched_data *q = qdisc_priv(sch);
1428 return (unsigned long)cbq_class_lookup(q, classid);
1431 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1433 struct cbq_sched_data *q = qdisc_priv(sch);
1435 WARN_ON(cl->filters);
1437 tcf_block_put(cl->block);
1439 qdisc_put_rtab(cl->R_tab);
1440 gen_kill_estimator(&cl->rate_est);
1445 static void cbq_destroy(struct Qdisc *sch)
1447 struct cbq_sched_data *q = qdisc_priv(sch);
1448 struct hlist_node *next;
1449 struct cbq_class *cl;
1452 #ifdef CONFIG_NET_CLS_ACT
1456 * Filters must be destroyed first because we don't destroy the
1457 * classes from root to leafs which means that filters can still
1458 * be bound to classes which have been destroyed already. --TGR '04
1460 for (h = 0; h < q->clhash.hashsize; h++) {
1461 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1462 tcf_block_put(cl->block);
1466 for (h = 0; h < q->clhash.hashsize; h++) {
1467 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1469 cbq_destroy_class(sch, cl);
1471 qdisc_class_hash_destroy(&q->clhash);
1475 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
1476 unsigned long *arg, struct netlink_ext_ack *extack)
1479 struct cbq_sched_data *q = qdisc_priv(sch);
1480 struct cbq_class *cl = (struct cbq_class *)*arg;
1481 struct nlattr *opt = tca[TCA_OPTIONS];
1482 struct nlattr *tb[TCA_CBQ_MAX + 1];
1483 struct cbq_class *parent;
1484 struct qdisc_rate_table *rtab = NULL;
1486 err = cbq_opt_parse(tb, opt, extack);
1490 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
1491 NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
1499 cl->tparent->common.classid != parentid) {
1500 NL_SET_ERR_MSG(extack, "Invalid parent id");
1503 if (!cl->tparent && parentid != TC_H_ROOT) {
1504 NL_SET_ERR_MSG(extack, "Parent must be root");
1509 if (tb[TCA_CBQ_RATE]) {
1510 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1511 tb[TCA_CBQ_RTAB], extack);
1516 if (tca[TCA_RATE]) {
1517 err = gen_replace_estimator(&cl->bstats, NULL,
1523 NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
1524 qdisc_put_rtab(rtab);
1529 /* Change class parameters */
1532 if (cl->next_alive != NULL)
1533 cbq_deactivate_class(cl);
1536 qdisc_put_rtab(cl->R_tab);
1540 if (tb[TCA_CBQ_LSSOPT])
1541 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1543 if (tb[TCA_CBQ_WRROPT]) {
1545 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1548 if (tb[TCA_CBQ_FOPT])
1549 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1552 cbq_activate_class(cl);
1554 sch_tree_unlock(sch);
1559 if (parentid == TC_H_ROOT)
1562 if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
1563 NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
1567 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
1574 if (TC_H_MAJ(classid ^ sch->handle) ||
1575 cbq_class_lookup(q, classid)) {
1576 NL_SET_ERR_MSG(extack, "Specified class not found");
1581 classid = TC_H_MAKE(sch->handle, 0x8000);
1583 for (i = 0; i < 0x8000; i++) {
1584 if (++q->hgenerator >= 0x8000)
1586 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1591 NL_SET_ERR_MSG(extack, "Unable to generate classid");
1594 classid = classid|q->hgenerator;
1599 parent = cbq_class_lookup(q, parentid);
1602 NL_SET_ERR_MSG(extack, "Failed to find parentid");
1608 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1612 gnet_stats_basic_sync_init(&cl->bstats);
1613 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1619 if (tca[TCA_RATE]) {
1620 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1621 NULL, true, tca[TCA_RATE]);
1623 NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
1624 tcf_block_put(cl->block);
1632 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
1635 cl->q = &noop_qdisc;
1637 qdisc_hash_add(cl->q, true);
1639 cl->common.classid = classid;
1640 cl->tparent = parent;
1642 cl->allot = parent->allot;
1643 cl->quantum = cl->allot;
1644 cl->weight = cl->R_tab->rate.rate;
1648 cl->borrow = cl->tparent;
1649 if (cl->tparent != &q->link)
1650 cl->share = cl->tparent;
1651 cbq_adjust_levels(parent);
1652 cl->minidle = -0x7FFFFFFF;
1653 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1654 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1655 if (cl->ewma_log == 0)
1656 cl->ewma_log = q->link.ewma_log;
1657 if (cl->maxidle == 0)
1658 cl->maxidle = q->link.maxidle;
1660 cl->avpkt = q->link.avpkt;
1661 if (tb[TCA_CBQ_FOPT])
1662 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1663 sch_tree_unlock(sch);
1665 qdisc_class_hash_grow(sch, &q->clhash);
1667 *arg = (unsigned long)cl;
1671 qdisc_put_rtab(rtab);
1675 static int cbq_delete(struct Qdisc *sch, unsigned long arg,
1676 struct netlink_ext_ack *extack)
1678 struct cbq_sched_data *q = qdisc_priv(sch);
1679 struct cbq_class *cl = (struct cbq_class *)arg;
1681 if (cl->filters || cl->children || cl == &q->link)
1686 qdisc_purge_queue(cl->q);
1689 cbq_deactivate_class(cl);
1691 if (q->tx_borrowed == cl)
1692 q->tx_borrowed = q->tx_class;
1693 if (q->tx_class == cl) {
1695 q->tx_borrowed = NULL;
1697 #ifdef CONFIG_NET_CLS_ACT
1698 if (q->rx_class == cl)
1702 cbq_unlink_class(cl);
1703 cbq_adjust_levels(cl->tparent);
1705 cbq_sync_defmap(cl);
1708 sch_tree_unlock(sch);
1710 cbq_destroy_class(sch, cl);
1714 static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
1715 struct netlink_ext_ack *extack)
1717 struct cbq_sched_data *q = qdisc_priv(sch);
1718 struct cbq_class *cl = (struct cbq_class *)arg;
1726 static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1729 struct cbq_sched_data *q = qdisc_priv(sch);
1730 struct cbq_class *p = (struct cbq_class *)parent;
1731 struct cbq_class *cl = cbq_class_lookup(q, classid);
1734 if (p && p->level <= cl->level)
1737 return (unsigned long)cl;
1742 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1744 struct cbq_class *cl = (struct cbq_class *)arg;
1749 static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1751 struct cbq_sched_data *q = qdisc_priv(sch);
1752 struct cbq_class *cl;
1758 for (h = 0; h < q->clhash.hashsize; h++) {
1759 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1760 if (arg->count < arg->skip) {
1764 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1773 static const struct Qdisc_class_ops cbq_class_ops = {
1776 .qlen_notify = cbq_qlen_notify,
1778 .change = cbq_change_class,
1779 .delete = cbq_delete,
1781 .tcf_block = cbq_tcf_block,
1782 .bind_tcf = cbq_bind_filter,
1783 .unbind_tcf = cbq_unbind_filter,
1784 .dump = cbq_dump_class,
1785 .dump_stats = cbq_dump_class_stats,
1788 static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
1790 .cl_ops = &cbq_class_ops,
1792 .priv_size = sizeof(struct cbq_sched_data),
1793 .enqueue = cbq_enqueue,
1794 .dequeue = cbq_dequeue,
1795 .peek = qdisc_peek_dequeued,
1798 .destroy = cbq_destroy,
1801 .dump_stats = cbq_dump_stats,
1802 .owner = THIS_MODULE,
1805 static int __init cbq_module_init(void)
1807 return register_qdisc(&cbq_qdisc_ops);
1809 static void __exit cbq_module_exit(void)
1811 unregister_qdisc(&cbq_qdisc_ops);
1813 module_init(cbq_module_init)
1814 module_exit(cbq_module_exit)
1815 MODULE_LICENSE("GPL");