2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
24 /* Class-Based Queueing (CBQ) algorithm.
25 =======================================
27 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
28 Management Models for Packet Networks",
29 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
31 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
33 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
36 [4] Sally Floyd and Michael Speer, "Experimental Results
37 for Class-Based Queueing", 1998, not published.
39 -----------------------------------------------------------------------
41 Algorithm skeleton was taken from NS simulator cbq.cc.
42 If someone wants to check this code against the LBL version,
43 he should take into account that ONLY the skeleton was borrowed,
44 the implementation is different. Particularly:
46 --- The WRR algorithm is different. Our version looks more
47 reasonable (I hope) and works when quanta are allowed to be
48 less than MTU, which is always the case when real time classes
49 have small rates. Note, that the statement of [3] is
50 incomplete, delay may actually be estimated even if class
51 per-round allotment is less than MTU. Namely, if per-round
52 allotment is W*r_i, and r_1+...+r_k = r < 1
54 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
56 In the worst case we have IntServ estimate with D = W*r+k*MTU
57 and C = MTU*r. The proof (if correct at all) is trivial.
60 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
61 interpret some places, which look like wrong translations
62 from NS. Anyone is advised to find these differences
63 and explain to me, why I am wrong 8).
65 --- Linux has no EOI event, so that we cannot estimate true class
66 idle time. Workaround is to consider the next dequeue event
67 as sign that previous packet is finished. This is wrong because of
68 internal device queueing, but on a permanently loaded link it is true.
69 Moreover, combined with clock integrator, this scheme looks
70 very close to an ideal solution. */
72 struct cbq_sched_data;
76 struct Qdisc_class_common common;
77 struct cbq_class *next_alive; /* next class with backlog in this priority band */
80 unsigned char priority; /* class priority */
81 unsigned char priority2; /* priority to be used after overlimit */
82 unsigned char ewma_log; /* time constant for idle time calculation */
86 /* Link-sharing scheduler parameters */
87 long maxidle; /* Class parameters: see below. */
91 struct qdisc_rate_table *R_tab;
93 /* General scheduler (WRR) parameters */
95 long quantum; /* Allotment per WRR round */
96 long weight; /* Relative allotment: see below */
98 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
99 struct cbq_class *split; /* Ptr to split node */
100 struct cbq_class *share; /* Ptr to LS parent in the class tree */
101 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
102 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
104 struct cbq_class *sibling; /* Sibling chain */
105 struct cbq_class *children; /* Pointer to children chain */
107 struct Qdisc *q; /* Elementary queueing discipline */
111 unsigned char cpriority; /* Effective priority */
112 unsigned char delayed;
113 unsigned char level; /* level of the class in hierarchy:
114 0 for leaf classes, and maximal
115 level of children + 1 for nodes.
118 psched_time_t last; /* Last end of service */
119 psched_time_t undertime;
121 long deficit; /* Saved deficit for WRR */
122 psched_time_t penalized;
123 struct gnet_stats_basic_packed bstats;
124 struct gnet_stats_queue qstats;
125 struct gnet_stats_rate_est64 rate_est;
126 struct tc_cbq_xstats xstats;
128 struct tcf_proto __rcu *filter_list;
133 struct cbq_class *defaults[TC_PRIO_MAX + 1];
136 struct cbq_sched_data {
137 struct Qdisc_class_hash clhash; /* Hash table of all classes */
138 int nclasses[TC_CBQ_MAXPRIO + 1];
139 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
141 struct cbq_class link;
143 unsigned int activemask;
144 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
147 #ifdef CONFIG_NET_CLS_ACT
148 struct cbq_class *rx_class;
150 struct cbq_class *tx_class;
151 struct cbq_class *tx_borrowed;
153 psched_time_t now; /* Cached timestamp */
156 struct hrtimer delay_timer;
157 struct qdisc_watchdog watchdog; /* Watchdog timer,
161 psched_tdiff_t wd_expires;
167 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
169 static inline struct cbq_class *
170 cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
172 struct Qdisc_class_common *clc;
174 clc = qdisc_class_find(&q->clhash, classid);
177 return container_of(clc, struct cbq_class, common);
180 #ifdef CONFIG_NET_CLS_ACT
182 static struct cbq_class *
183 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
185 struct cbq_class *cl;
187 for (cl = this->tparent; cl; cl = cl->tparent) {
188 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
190 if (new != NULL && new != this)
198 /* Classify packet. The procedure is pretty complicated, but
199 * it allows us to combine link sharing and priority scheduling
202 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
203 * so that it resolves to split nodes. Then packets are classified
204 * by logical priority, or a more specific classifier may be attached
208 static struct cbq_class *
209 cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
211 struct cbq_sched_data *q = qdisc_priv(sch);
212 struct cbq_class *head = &q->link;
213 struct cbq_class **defmap;
214 struct cbq_class *cl = NULL;
215 u32 prio = skb->priority;
216 struct tcf_proto *fl;
217 struct tcf_result res;
220 * Step 1. If skb->priority points to one of our classes, use it.
222 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
223 (cl = cbq_class_lookup(q, prio)) != NULL)
226 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
229 defmap = head->defaults;
231 fl = rcu_dereference_bh(head->filter_list);
233 * Step 2+n. Apply classifier.
235 result = tc_classify(skb, fl, &res, true);
236 if (!fl || result < 0)
239 cl = (void *)res.class;
241 if (TC_H_MAJ(res.classid))
242 cl = cbq_class_lookup(q, res.classid);
243 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
244 cl = defmap[TC_PRIO_BESTEFFORT];
249 if (cl->level >= head->level)
251 #ifdef CONFIG_NET_CLS_ACT
255 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
258 case TC_ACT_RECLASSIFY:
259 return cbq_reclassify(skb, cl);
266 * Step 3+n. If classifier selected a link sharing class,
267 * apply agency specific classifier.
268 * Repeat this procdure until we hit a leaf node.
277 * Step 4. No success...
279 if (TC_H_MAJ(prio) == 0 &&
280 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
281 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
288 * A packet has just been enqueued on the empty class.
289 * cbq_activate_class adds it to the tail of active class list
290 * of its priority band.
293 static inline void cbq_activate_class(struct cbq_class *cl)
295 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
296 int prio = cl->cpriority;
297 struct cbq_class *cl_tail;
299 cl_tail = q->active[prio];
300 q->active[prio] = cl;
302 if (cl_tail != NULL) {
303 cl->next_alive = cl_tail->next_alive;
304 cl_tail->next_alive = cl;
307 q->activemask |= (1<<prio);
312 * Unlink class from active chain.
313 * Note that this same procedure is done directly in cbq_dequeue*
314 * during round-robin procedure.
317 static void cbq_deactivate_class(struct cbq_class *this)
319 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
320 int prio = this->cpriority;
321 struct cbq_class *cl;
322 struct cbq_class *cl_prev = q->active[prio];
325 cl = cl_prev->next_alive;
327 cl_prev->next_alive = cl->next_alive;
328 cl->next_alive = NULL;
330 if (cl == q->active[prio]) {
331 q->active[prio] = cl_prev;
332 if (cl == q->active[prio]) {
333 q->active[prio] = NULL;
334 q->activemask &= ~(1<<prio);
340 } while ((cl_prev = cl) != q->active[prio]);
344 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
346 int toplevel = q->toplevel;
348 if (toplevel > cl->level) {
349 psched_time_t now = psched_get_time();
352 if (cl->undertime < now) {
353 q->toplevel = cl->level;
356 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
361 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
362 struct sk_buff **to_free)
364 struct cbq_sched_data *q = qdisc_priv(sch);
365 int uninitialized_var(ret);
366 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
368 #ifdef CONFIG_NET_CLS_ACT
372 if (ret & __NET_XMIT_BYPASS)
373 qdisc_qstats_drop(sch);
374 __qdisc_drop(skb, to_free);
378 ret = qdisc_enqueue(skb, cl->q, to_free);
379 if (ret == NET_XMIT_SUCCESS) {
381 cbq_mark_toplevel(q, cl);
383 cbq_activate_class(cl);
387 if (net_xmit_drop_count(ret)) {
388 qdisc_qstats_drop(sch);
389 cbq_mark_toplevel(q, cl);
395 /* Overlimit action: penalize leaf class by adding offtime */
396 static void cbq_overlimit(struct cbq_class *cl)
398 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
399 psched_tdiff_t delay = cl->undertime - q->now;
402 delay += cl->offtime;
405 * Class goes to sleep, so that it will have no
406 * chance to work avgidle. Let's forgive it 8)
408 * BTW cbq-2.0 has a crap in this
409 * place, apparently they forgot to shift it by cl->ewma_log.
412 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
413 if (cl->avgidle < cl->minidle)
414 cl->avgidle = cl->minidle;
417 cl->undertime = q->now + delay;
419 cl->xstats.overactions++;
422 if (q->wd_expires == 0 || q->wd_expires > delay)
423 q->wd_expires = delay;
425 /* Dirty work! We must schedule wakeups based on
426 * real available rate, rather than leaf rate,
427 * which may be tiny (even zero).
429 if (q->toplevel == TC_CBQ_MAXLEVEL) {
431 psched_tdiff_t base_delay = q->wd_expires;
433 for (b = cl->borrow; b; b = b->borrow) {
434 delay = b->undertime - q->now;
435 if (delay < base_delay) {
442 q->wd_expires = base_delay;
446 static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
449 struct cbq_class *cl;
450 struct cbq_class *cl_prev = q->active[prio];
451 psched_time_t sched = now;
457 cl = cl_prev->next_alive;
458 if (now - cl->penalized > 0) {
459 cl_prev->next_alive = cl->next_alive;
460 cl->next_alive = NULL;
461 cl->cpriority = cl->priority;
463 cbq_activate_class(cl);
465 if (cl == q->active[prio]) {
466 q->active[prio] = cl_prev;
467 if (cl == q->active[prio]) {
468 q->active[prio] = NULL;
473 cl = cl_prev->next_alive;
474 } else if (sched - cl->penalized > 0)
475 sched = cl->penalized;
476 } while ((cl_prev = cl) != q->active[prio]);
481 static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
483 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
485 struct Qdisc *sch = q->watchdog.qdisc;
487 psched_tdiff_t delay = 0;
490 now = psched_get_time();
496 int prio = ffz(~pmask);
501 tmp = cbq_undelay_prio(q, prio, now);
504 if (tmp < delay || delay == 0)
512 time = ktime_set(0, 0);
513 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
514 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
517 __netif_schedule(qdisc_root(sch));
518 return HRTIMER_NORESTART;
522 * It is mission critical procedure.
524 * We "regenerate" toplevel cutoff, if transmitting class
525 * has backlog and it is not regulated. It is not part of
526 * original CBQ description, but looks more reasonable.
527 * Probably, it is wrong. This question needs further investigation.
531 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
532 struct cbq_class *borrowed)
534 if (cl && q->toplevel >= borrowed->level) {
535 if (cl->q->q.qlen > 1) {
537 if (borrowed->undertime == PSCHED_PASTPERFECT) {
538 q->toplevel = borrowed->level;
541 } while ((borrowed = borrowed->borrow) != NULL);
544 /* It is not necessary now. Uncommenting it
545 will save CPU cycles, but decrease fairness.
547 q->toplevel = TC_CBQ_MAXLEVEL;
553 cbq_update(struct cbq_sched_data *q)
555 struct cbq_class *this = q->tx_class;
556 struct cbq_class *cl = this;
561 /* Time integrator. We calculate EOS time
562 * by adding expected packet transmission time.
564 now = q->now + L2T(&q->link, len);
566 for ( ; cl; cl = cl->share) {
567 long avgidle = cl->avgidle;
570 cl->bstats.packets++;
571 cl->bstats.bytes += len;
574 * (now - last) is total time between packet right edges.
575 * (last_pktlen/rate) is "virtual" busy time, so that
577 * idle = (now - last) - last_pktlen/rate
580 idle = now - cl->last;
581 if ((unsigned long)idle > 128*1024*1024) {
582 avgidle = cl->maxidle;
584 idle -= L2T(cl, len);
586 /* true_avgidle := (1-W)*true_avgidle + W*idle,
587 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
588 * cl->avgidle == true_avgidle/W,
591 avgidle += idle - (avgidle>>cl->ewma_log);
595 /* Overlimit or at-limit */
597 if (avgidle < cl->minidle)
598 avgidle = cl->minidle;
600 cl->avgidle = avgidle;
602 /* Calculate expected time, when this class
603 * will be allowed to send.
604 * It will occur, when:
605 * (1-W)*true_avgidle + W*delay = 0, i.e.
606 * idle = (1/W - 1)*(-true_avgidle)
608 * idle = (1 - W)*(-cl->avgidle);
610 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
614 * To maintain the rate allocated to the class,
615 * we add to undertime virtual clock,
616 * necessary to complete transmitted packet.
617 * (len/phys_bandwidth has been already passed
618 * to the moment of cbq_update)
621 idle -= L2T(&q->link, len);
622 idle += L2T(cl, len);
624 cl->undertime = now + idle;
628 cl->undertime = PSCHED_PASTPERFECT;
629 if (avgidle > cl->maxidle)
630 cl->avgidle = cl->maxidle;
632 cl->avgidle = avgidle;
634 if ((s64)(now - cl->last) > 0)
638 cbq_update_toplevel(q, this, q->tx_borrowed);
641 static inline struct cbq_class *
642 cbq_under_limit(struct cbq_class *cl)
644 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
645 struct cbq_class *this_cl = cl;
647 if (cl->tparent == NULL)
650 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
656 /* It is very suspicious place. Now overlimit
657 * action is generated for not bounded classes
658 * only if link is completely congested.
659 * Though it is in agree with ancestor-only paradigm,
660 * it looks very stupid. Particularly,
661 * it means that this chunk of code will either
662 * never be called or result in strong amplification
663 * of burstiness. Dangerous, silly, and, however,
664 * no another solution exists.
668 this_cl->qstats.overlimits++;
669 cbq_overlimit(this_cl);
672 if (cl->level > q->toplevel)
674 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
680 static inline struct sk_buff *
681 cbq_dequeue_prio(struct Qdisc *sch, int prio)
683 struct cbq_sched_data *q = qdisc_priv(sch);
684 struct cbq_class *cl_tail, *cl_prev, *cl;
688 cl_tail = cl_prev = q->active[prio];
689 cl = cl_prev->next_alive;
696 struct cbq_class *borrow = cl;
699 (borrow = cbq_under_limit(cl)) == NULL)
702 if (cl->deficit <= 0) {
703 /* Class exhausted its allotment per
704 * this round. Switch to the next one.
707 cl->deficit += cl->quantum;
711 skb = cl->q->dequeue(cl->q);
713 /* Class did not give us any skb :-(
714 * It could occur even if cl->q->q.qlen != 0
715 * f.e. if cl->q == "tbf"
720 cl->deficit -= qdisc_pkt_len(skb);
722 q->tx_borrowed = borrow;
724 #ifndef CBQ_XSTATS_BORROWS_BYTES
725 borrow->xstats.borrows++;
726 cl->xstats.borrows++;
728 borrow->xstats.borrows += qdisc_pkt_len(skb);
729 cl->xstats.borrows += qdisc_pkt_len(skb);
732 q->tx_len = qdisc_pkt_len(skb);
734 if (cl->deficit <= 0) {
735 q->active[prio] = cl;
737 cl->deficit += cl->quantum;
742 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
743 /* Class is empty or penalized.
744 * Unlink it from active chain.
746 cl_prev->next_alive = cl->next_alive;
747 cl->next_alive = NULL;
749 /* Did cl_tail point to it? */
754 /* Was it the last class in this band? */
757 q->active[prio] = NULL;
758 q->activemask &= ~(1<<prio);
760 cbq_activate_class(cl);
764 q->active[prio] = cl_tail;
767 cbq_activate_class(cl);
775 } while (cl_prev != cl_tail);
778 q->active[prio] = cl_prev;
783 static inline struct sk_buff *
784 cbq_dequeue_1(struct Qdisc *sch)
786 struct cbq_sched_data *q = qdisc_priv(sch);
788 unsigned int activemask;
790 activemask = q->activemask & 0xFF;
792 int prio = ffz(~activemask);
793 activemask &= ~(1<<prio);
794 skb = cbq_dequeue_prio(sch, prio);
801 static struct sk_buff *
802 cbq_dequeue(struct Qdisc *sch)
805 struct cbq_sched_data *q = qdisc_priv(sch);
808 now = psched_get_time();
818 skb = cbq_dequeue_1(sch);
820 qdisc_bstats_update(sch, skb);
825 /* All the classes are overlimit.
827 * It is possible, if:
829 * 1. Scheduler is empty.
830 * 2. Toplevel cutoff inhibited borrowing.
831 * 3. Root class is overlimit.
833 * Reset 2d and 3d conditions and retry.
835 * Note, that NS and cbq-2.0 are buggy, peeking
836 * an arbitrary class is appropriate for ancestor-only
837 * sharing, but not for toplevel algorithm.
839 * Our version is better, but slower, because it requires
840 * two passes, but it is unavoidable with top-level sharing.
843 if (q->toplevel == TC_CBQ_MAXLEVEL &&
844 q->link.undertime == PSCHED_PASTPERFECT)
847 q->toplevel = TC_CBQ_MAXLEVEL;
848 q->link.undertime = PSCHED_PASTPERFECT;
851 /* No packets in scheduler or nobody wants to give them to us :-(
852 * Sigh... start watchdog timer in the last case.
856 qdisc_qstats_overlimit(sch);
858 qdisc_watchdog_schedule(&q->watchdog,
859 now + q->wd_expires);
864 /* CBQ class maintanance routines */
866 static void cbq_adjust_levels(struct cbq_class *this)
873 struct cbq_class *cl;
878 if (cl->level > level)
880 } while ((cl = cl->sibling) != this->children);
882 this->level = level + 1;
883 } while ((this = this->tparent) != NULL);
886 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
888 struct cbq_class *cl;
891 if (q->quanta[prio] == 0)
894 for (h = 0; h < q->clhash.hashsize; h++) {
895 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
896 /* BUGGGG... Beware! This expression suffer of
897 * arithmetic overflows!
899 if (cl->priority == prio) {
900 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
903 if (cl->quantum <= 0 ||
904 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
905 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
906 cl->common.classid, cl->quantum);
907 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
913 static void cbq_sync_defmap(struct cbq_class *cl)
915 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
916 struct cbq_class *split = cl->split;
923 for (i = 0; i <= TC_PRIO_MAX; i++) {
924 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
925 split->defaults[i] = NULL;
928 for (i = 0; i <= TC_PRIO_MAX; i++) {
929 int level = split->level;
931 if (split->defaults[i])
934 for (h = 0; h < q->clhash.hashsize; h++) {
937 hlist_for_each_entry(c, &q->clhash.hash[h],
939 if (c->split == split && c->level < level &&
940 c->defmap & (1<<i)) {
941 split->defaults[i] = c;
949 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
951 struct cbq_class *split = NULL;
957 splitid = split->common.classid;
960 if (split == NULL || split->common.classid != splitid) {
961 for (split = cl->tparent; split; split = split->tparent)
962 if (split->common.classid == splitid)
969 if (cl->split != split) {
973 cl->defmap = def & mask;
975 cl->defmap = (cl->defmap & ~mask) | (def & mask);
980 static void cbq_unlink_class(struct cbq_class *this)
982 struct cbq_class *cl, **clp;
983 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
985 qdisc_class_hash_remove(&q->clhash, &this->common);
988 clp = &this->sibling;
996 } while ((cl = *clp) != this->sibling);
998 if (this->tparent->children == this) {
999 this->tparent->children = this->sibling;
1000 if (this->sibling == this)
1001 this->tparent->children = NULL;
1004 WARN_ON(this->sibling != this);
1008 static void cbq_link_class(struct cbq_class *this)
1010 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1011 struct cbq_class *parent = this->tparent;
1013 this->sibling = this;
1014 qdisc_class_hash_insert(&q->clhash, &this->common);
1019 if (parent->children == NULL) {
1020 parent->children = this;
1022 this->sibling = parent->children->sibling;
1023 parent->children->sibling = this;
1028 cbq_reset(struct Qdisc *sch)
1030 struct cbq_sched_data *q = qdisc_priv(sch);
1031 struct cbq_class *cl;
1038 q->tx_borrowed = NULL;
1039 qdisc_watchdog_cancel(&q->watchdog);
1040 hrtimer_cancel(&q->delay_timer);
1041 q->toplevel = TC_CBQ_MAXLEVEL;
1042 q->now = psched_get_time();
1044 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1045 q->active[prio] = NULL;
1047 for (h = 0; h < q->clhash.hashsize; h++) {
1048 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1051 cl->next_alive = NULL;
1052 cl->undertime = PSCHED_PASTPERFECT;
1053 cl->avgidle = cl->maxidle;
1054 cl->deficit = cl->quantum;
1055 cl->cpriority = cl->priority;
1062 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1064 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1065 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1066 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1068 if (lss->change & TCF_CBQ_LSS_EWMA)
1069 cl->ewma_log = lss->ewma_log;
1070 if (lss->change & TCF_CBQ_LSS_AVPKT)
1071 cl->avpkt = lss->avpkt;
1072 if (lss->change & TCF_CBQ_LSS_MINIDLE)
1073 cl->minidle = -(long)lss->minidle;
1074 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
1075 cl->maxidle = lss->maxidle;
1076 cl->avgidle = lss->maxidle;
1078 if (lss->change & TCF_CBQ_LSS_OFFTIME)
1079 cl->offtime = lss->offtime;
1083 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1085 q->nclasses[cl->priority]--;
1086 q->quanta[cl->priority] -= cl->weight;
1087 cbq_normalize_quanta(q, cl->priority);
1090 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1092 q->nclasses[cl->priority]++;
1093 q->quanta[cl->priority] += cl->weight;
1094 cbq_normalize_quanta(q, cl->priority);
1097 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1099 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1102 cl->allot = wrr->allot;
1104 cl->weight = wrr->weight;
1105 if (wrr->priority) {
1106 cl->priority = wrr->priority - 1;
1107 cl->cpriority = cl->priority;
1108 if (cl->priority >= cl->priority2)
1109 cl->priority2 = TC_CBQ_MAXPRIO - 1;
1116 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1118 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1122 static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1123 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1124 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1125 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1126 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1127 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1128 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1129 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1132 static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1], struct nlattr *opt)
1139 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
1143 if (tb[TCA_CBQ_WRROPT]) {
1144 const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
1146 if (wrr->priority > TC_CBQ_MAXPRIO)
1152 static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1154 struct cbq_sched_data *q = qdisc_priv(sch);
1155 struct nlattr *tb[TCA_CBQ_MAX + 1];
1156 struct tc_ratespec *r;
1159 err = cbq_opt_parse(tb, opt);
1163 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
1166 r = nla_data(tb[TCA_CBQ_RATE]);
1168 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
1171 err = qdisc_class_hash_init(&q->clhash);
1176 q->link.sibling = &q->link;
1177 q->link.common.classid = sch->handle;
1178 q->link.qdisc = sch;
1179 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1182 q->link.q = &noop_qdisc;
1184 q->link.priority = TC_CBQ_MAXPRIO - 1;
1185 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1186 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1187 q->link.allot = psched_mtu(qdisc_dev(sch));
1188 q->link.quantum = q->link.allot;
1189 q->link.weight = q->link.R_tab->rate.rate;
1191 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1192 q->link.avpkt = q->link.allot/2;
1193 q->link.minidle = -0x7FFFFFFF;
1195 qdisc_watchdog_init(&q->watchdog, sch);
1196 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1197 q->delay_timer.function = cbq_undelay;
1198 q->toplevel = TC_CBQ_MAXLEVEL;
1199 q->now = psched_get_time();
1201 cbq_link_class(&q->link);
1203 if (tb[TCA_CBQ_LSSOPT])
1204 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
1206 cbq_addprio(q, &q->link);
1210 qdisc_put_rtab(q->link.R_tab);
1214 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1216 unsigned char *b = skb_tail_pointer(skb);
1218 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1219 goto nla_put_failure;
1227 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1229 unsigned char *b = skb_tail_pointer(skb);
1230 struct tc_cbq_lssopt opt;
1233 if (cl->borrow == NULL)
1234 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1235 if (cl->share == NULL)
1236 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1237 opt.ewma_log = cl->ewma_log;
1238 opt.level = cl->level;
1239 opt.avpkt = cl->avpkt;
1240 opt.maxidle = cl->maxidle;
1241 opt.minidle = (u32)(-cl->minidle);
1242 opt.offtime = cl->offtime;
1244 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1245 goto nla_put_failure;
1253 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1255 unsigned char *b = skb_tail_pointer(skb);
1256 struct tc_cbq_wrropt opt;
1258 memset(&opt, 0, sizeof(opt));
1260 opt.allot = cl->allot;
1261 opt.priority = cl->priority + 1;
1262 opt.cpriority = cl->cpriority + 1;
1263 opt.weight = cl->weight;
1264 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1265 goto nla_put_failure;
1273 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1275 unsigned char *b = skb_tail_pointer(skb);
1276 struct tc_cbq_fopt opt;
1278 if (cl->split || cl->defmap) {
1279 opt.split = cl->split ? cl->split->common.classid : 0;
1280 opt.defmap = cl->defmap;
1282 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1283 goto nla_put_failure;
1292 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1294 if (cbq_dump_lss(skb, cl) < 0 ||
1295 cbq_dump_rate(skb, cl) < 0 ||
1296 cbq_dump_wrr(skb, cl) < 0 ||
1297 cbq_dump_fopt(skb, cl) < 0)
1302 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1304 struct cbq_sched_data *q = qdisc_priv(sch);
1305 struct nlattr *nest;
1307 nest = nla_nest_start(skb, TCA_OPTIONS);
1309 goto nla_put_failure;
1310 if (cbq_dump_attr(skb, &q->link) < 0)
1311 goto nla_put_failure;
1312 return nla_nest_end(skb, nest);
1315 nla_nest_cancel(skb, nest);
1320 cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1322 struct cbq_sched_data *q = qdisc_priv(sch);
1324 q->link.xstats.avgidle = q->link.avgidle;
1325 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1329 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1330 struct sk_buff *skb, struct tcmsg *tcm)
1332 struct cbq_class *cl = (struct cbq_class *)arg;
1333 struct nlattr *nest;
1336 tcm->tcm_parent = cl->tparent->common.classid;
1338 tcm->tcm_parent = TC_H_ROOT;
1339 tcm->tcm_handle = cl->common.classid;
1340 tcm->tcm_info = cl->q->handle;
1342 nest = nla_nest_start(skb, TCA_OPTIONS);
1344 goto nla_put_failure;
1345 if (cbq_dump_attr(skb, cl) < 0)
1346 goto nla_put_failure;
1347 return nla_nest_end(skb, nest);
1350 nla_nest_cancel(skb, nest);
1355 cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1356 struct gnet_dump *d)
1358 struct cbq_sched_data *q = qdisc_priv(sch);
1359 struct cbq_class *cl = (struct cbq_class *)arg;
1361 cl->xstats.avgidle = cl->avgidle;
1362 cl->xstats.undertime = 0;
1364 if (cl->undertime != PSCHED_PASTPERFECT)
1365 cl->xstats.undertime = cl->undertime - q->now;
1367 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1368 d, NULL, &cl->bstats) < 0 ||
1369 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
1370 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
1373 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1376 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1379 struct cbq_class *cl = (struct cbq_class *)arg;
1382 new = qdisc_create_dflt(sch->dev_queue,
1383 &pfifo_qdisc_ops, cl->common.classid);
1388 *old = qdisc_replace(sch, new, &cl->q);
1392 static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
1394 struct cbq_class *cl = (struct cbq_class *)arg;
1399 static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1401 struct cbq_class *cl = (struct cbq_class *)arg;
1403 if (cl->q->q.qlen == 0)
1404 cbq_deactivate_class(cl);
1407 static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1409 struct cbq_sched_data *q = qdisc_priv(sch);
1410 struct cbq_class *cl = cbq_class_lookup(q, classid);
1414 return (unsigned long)cl;
1419 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1421 struct cbq_sched_data *q = qdisc_priv(sch);
1423 WARN_ON(cl->filters);
1425 tcf_destroy_chain(&cl->filter_list);
1426 qdisc_destroy(cl->q);
1427 qdisc_put_rtab(cl->R_tab);
1428 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1433 static void cbq_destroy(struct Qdisc *sch)
1435 struct cbq_sched_data *q = qdisc_priv(sch);
1436 struct hlist_node *next;
1437 struct cbq_class *cl;
1440 #ifdef CONFIG_NET_CLS_ACT
1444 * Filters must be destroyed first because we don't destroy the
1445 * classes from root to leafs which means that filters can still
1446 * be bound to classes which have been destroyed already. --TGR '04
1448 for (h = 0; h < q->clhash.hashsize; h++) {
1449 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
1450 tcf_destroy_chain(&cl->filter_list);
1452 for (h = 0; h < q->clhash.hashsize; h++) {
1453 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1455 cbq_destroy_class(sch, cl);
1457 qdisc_class_hash_destroy(&q->clhash);
1460 static void cbq_put(struct Qdisc *sch, unsigned long arg)
1462 struct cbq_class *cl = (struct cbq_class *)arg;
1464 if (--cl->refcnt == 0) {
1465 #ifdef CONFIG_NET_CLS_ACT
1466 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1467 struct cbq_sched_data *q = qdisc_priv(sch);
1469 spin_lock_bh(root_lock);
1470 if (q->rx_class == cl)
1472 spin_unlock_bh(root_lock);
1475 cbq_destroy_class(sch, cl);
1480 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
1484 struct cbq_sched_data *q = qdisc_priv(sch);
1485 struct cbq_class *cl = (struct cbq_class *)*arg;
1486 struct nlattr *opt = tca[TCA_OPTIONS];
1487 struct nlattr *tb[TCA_CBQ_MAX + 1];
1488 struct cbq_class *parent;
1489 struct qdisc_rate_table *rtab = NULL;
1491 err = cbq_opt_parse(tb, opt);
1495 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
1502 cl->tparent->common.classid != parentid)
1504 if (!cl->tparent && parentid != TC_H_ROOT)
1508 if (tb[TCA_CBQ_RATE]) {
1509 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1515 if (tca[TCA_RATE]) {
1516 err = gen_replace_estimator(&cl->bstats, NULL,
1519 qdisc_root_sleeping_running(sch),
1522 qdisc_put_rtab(rtab);
1527 /* Change class parameters */
1530 if (cl->next_alive != NULL)
1531 cbq_deactivate_class(cl);
1534 qdisc_put_rtab(cl->R_tab);
1538 if (tb[TCA_CBQ_LSSOPT])
1539 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1541 if (tb[TCA_CBQ_WRROPT]) {
1543 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1546 if (tb[TCA_CBQ_FOPT])
1547 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1550 cbq_activate_class(cl);
1552 sch_tree_unlock(sch);
1557 if (parentid == TC_H_ROOT)
1560 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1561 tb[TCA_CBQ_LSSOPT] == NULL)
1564 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
1570 if (TC_H_MAJ(classid ^ sch->handle) ||
1571 cbq_class_lookup(q, classid))
1575 classid = TC_H_MAKE(sch->handle, 0x8000);
1577 for (i = 0; i < 0x8000; i++) {
1578 if (++q->hgenerator >= 0x8000)
1580 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1586 classid = classid|q->hgenerator;
1591 parent = cbq_class_lookup(q, parentid);
1598 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1602 if (tca[TCA_RATE]) {
1603 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1605 qdisc_root_sleeping_running(sch),
1616 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1618 cl->q = &noop_qdisc;
1619 cl->common.classid = classid;
1620 cl->tparent = parent;
1622 cl->allot = parent->allot;
1623 cl->quantum = cl->allot;
1624 cl->weight = cl->R_tab->rate.rate;
1628 cl->borrow = cl->tparent;
1629 if (cl->tparent != &q->link)
1630 cl->share = cl->tparent;
1631 cbq_adjust_levels(parent);
1632 cl->minidle = -0x7FFFFFFF;
1633 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1634 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1635 if (cl->ewma_log == 0)
1636 cl->ewma_log = q->link.ewma_log;
1637 if (cl->maxidle == 0)
1638 cl->maxidle = q->link.maxidle;
1640 cl->avpkt = q->link.avpkt;
1641 if (tb[TCA_CBQ_FOPT])
1642 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1643 sch_tree_unlock(sch);
1645 qdisc_class_hash_grow(sch, &q->clhash);
1647 *arg = (unsigned long)cl;
1651 qdisc_put_rtab(rtab);
1655 static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1657 struct cbq_sched_data *q = qdisc_priv(sch);
1658 struct cbq_class *cl = (struct cbq_class *)arg;
1659 unsigned int qlen, backlog;
1661 if (cl->filters || cl->children || cl == &q->link)
1666 qlen = cl->q->q.qlen;
1667 backlog = cl->q->qstats.backlog;
1669 qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
1672 cbq_deactivate_class(cl);
1674 if (q->tx_borrowed == cl)
1675 q->tx_borrowed = q->tx_class;
1676 if (q->tx_class == cl) {
1678 q->tx_borrowed = NULL;
1680 #ifdef CONFIG_NET_CLS_ACT
1681 if (q->rx_class == cl)
1685 cbq_unlink_class(cl);
1686 cbq_adjust_levels(cl->tparent);
1688 cbq_sync_defmap(cl);
1691 sch_tree_unlock(sch);
1693 BUG_ON(--cl->refcnt == 0);
1695 * This shouldn't happen: we "hold" one cops->get() when called
1696 * from tc_ctl_tclass; the destroy method is done from cops->put().
1702 static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
1705 struct cbq_sched_data *q = qdisc_priv(sch);
1706 struct cbq_class *cl = (struct cbq_class *)arg;
1711 return &cl->filter_list;
1714 static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1717 struct cbq_sched_data *q = qdisc_priv(sch);
1718 struct cbq_class *p = (struct cbq_class *)parent;
1719 struct cbq_class *cl = cbq_class_lookup(q, classid);
1722 if (p && p->level <= cl->level)
1725 return (unsigned long)cl;
1730 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1732 struct cbq_class *cl = (struct cbq_class *)arg;
1737 static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1739 struct cbq_sched_data *q = qdisc_priv(sch);
1740 struct cbq_class *cl;
1746 for (h = 0; h < q->clhash.hashsize; h++) {
1747 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1748 if (arg->count < arg->skip) {
1752 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1761 static const struct Qdisc_class_ops cbq_class_ops = {
1764 .qlen_notify = cbq_qlen_notify,
1767 .change = cbq_change_class,
1768 .delete = cbq_delete,
1770 .tcf_chain = cbq_find_tcf,
1771 .bind_tcf = cbq_bind_filter,
1772 .unbind_tcf = cbq_unbind_filter,
1773 .dump = cbq_dump_class,
1774 .dump_stats = cbq_dump_class_stats,
1777 static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
1779 .cl_ops = &cbq_class_ops,
1781 .priv_size = sizeof(struct cbq_sched_data),
1782 .enqueue = cbq_enqueue,
1783 .dequeue = cbq_dequeue,
1784 .peek = qdisc_peek_dequeued,
1787 .destroy = cbq_destroy,
1790 .dump_stats = cbq_dump_stats,
1791 .owner = THIS_MODULE,
1794 static int __init cbq_module_init(void)
1796 return register_qdisc(&cbq_qdisc_ops);
1798 static void __exit cbq_module_exit(void)
1800 unregister_qdisc(&cbq_qdisc_ops);
1802 module_init(cbq_module_init)
1803 module_exit(cbq_module_exit)
1804 MODULE_LICENSE("GPL");