2 * net/sched/sch_gred.c Generic Random Early Detection queue.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
12 * 991129: - Bug fix with grio mode
13 * - a better sing. AvgQ mode with Grio(WRED)
14 * - A finer grained VQ dequeue based on sugestion
18 * For all the glorious comments look at include/net/red.h
21 #include <linux/slab.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
26 #include <net/pkt_sched.h>
29 #define GRED_DEF_PRIO (MAX_DPs / 2)
30 #define GRED_VQ_MASK (MAX_DPs - 1)
32 struct gred_sched_data;
35 struct gred_sched_data {
36 u32 limit; /* HARD maximal queue length */
37 u32 DP; /* the drop parameters */
38 u32 bytesin; /* bytes seen on virtualQ so far*/
39 u32 packetsin; /* packets seen on virtualQ so far*/
40 u32 backlog; /* bytes on the virtualQ */
41 u8 prio; /* the prio of this vq */
43 struct red_parms parms;
45 struct red_stats stats;
54 struct gred_sched_data *tab[MAX_DPs];
59 struct red_vars wred_set;
62 static inline int gred_wred_mode(struct gred_sched *table)
64 return test_bit(GRED_WRED_MODE, &table->flags);
67 static inline void gred_enable_wred_mode(struct gred_sched *table)
69 __set_bit(GRED_WRED_MODE, &table->flags);
72 static inline void gred_disable_wred_mode(struct gred_sched *table)
74 __clear_bit(GRED_WRED_MODE, &table->flags);
77 static inline int gred_rio_mode(struct gred_sched *table)
79 return test_bit(GRED_RIO_MODE, &table->flags);
82 static inline void gred_enable_rio_mode(struct gred_sched *table)
84 __set_bit(GRED_RIO_MODE, &table->flags);
87 static inline void gred_disable_rio_mode(struct gred_sched *table)
89 __clear_bit(GRED_RIO_MODE, &table->flags);
92 static inline int gred_wred_mode_check(struct Qdisc *sch)
94 struct gred_sched *table = qdisc_priv(sch);
97 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
98 for (i = 0; i < table->DPs; i++) {
99 struct gred_sched_data *q = table->tab[i];
105 for (n = i + 1; n < table->DPs; n++)
106 if (table->tab[n] && table->tab[n]->prio == q->prio)
113 static inline unsigned int gred_backlog(struct gred_sched *table,
114 struct gred_sched_data *q,
117 if (gred_wred_mode(table))
118 return sch->qstats.backlog;
123 static inline u16 tc_index_to_dp(struct sk_buff *skb)
125 return skb->tc_index & GRED_VQ_MASK;
128 static inline void gred_load_wred_set(const struct gred_sched *table,
129 struct gred_sched_data *q)
131 q->vars.qavg = table->wred_set.qavg;
132 q->vars.qidlestart = table->wred_set.qidlestart;
135 static inline void gred_store_wred_set(struct gred_sched *table,
136 struct gred_sched_data *q)
138 table->wred_set.qavg = q->vars.qavg;
139 table->wred_set.qidlestart = q->vars.qidlestart;
142 static inline int gred_use_ecn(struct gred_sched *t)
144 return t->red_flags & TC_RED_ECN;
147 static inline int gred_use_harddrop(struct gred_sched *t)
149 return t->red_flags & TC_RED_HARDDROP;
152 static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
153 struct sk_buff **to_free)
155 struct gred_sched_data *q = NULL;
156 struct gred_sched *t = qdisc_priv(sch);
157 unsigned long qavg = 0;
158 u16 dp = tc_index_to_dp(skb);
160 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
165 /* Pass through packets not assigned to a DP
166 * if no default DP has been configured. This
167 * allows for DP flows to be left untouched.
169 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
171 return qdisc_enqueue_tail(skb, sch);
176 /* fix tc_index? --could be controversial but needed for
178 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
181 /* sum up all the qaves of prios < ours to get the new qave */
182 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
185 for (i = 0; i < t->DPs; i++) {
186 if (t->tab[i] && t->tab[i]->prio < q->prio &&
187 !red_is_idling(&t->tab[i]->vars))
188 qavg += t->tab[i]->vars.qavg;
194 q->bytesin += qdisc_pkt_len(skb);
196 if (gred_wred_mode(t))
197 gred_load_wred_set(t, q);
199 q->vars.qavg = red_calc_qavg(&q->parms,
201 gred_backlog(t, q, sch));
203 if (red_is_idling(&q->vars))
204 red_end_of_idle_period(&q->vars);
206 if (gred_wred_mode(t))
207 gred_store_wred_set(t, q);
209 switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
214 qdisc_qstats_overlimit(sch);
215 if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
216 q->stats.prob_drop++;
217 goto congestion_drop;
220 q->stats.prob_mark++;
224 qdisc_qstats_overlimit(sch);
225 if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
226 !INET_ECN_set_ce(skb)) {
227 q->stats.forced_drop++;
228 goto congestion_drop;
230 q->stats.forced_mark++;
234 if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
235 q->backlog += qdisc_pkt_len(skb);
236 return qdisc_enqueue_tail(skb, sch);
241 return qdisc_drop(skb, sch, to_free);
244 qdisc_drop(skb, sch, to_free);
248 static struct sk_buff *gred_dequeue(struct Qdisc *sch)
251 struct gred_sched *t = qdisc_priv(sch);
253 skb = qdisc_dequeue_head(sch);
256 struct gred_sched_data *q;
257 u16 dp = tc_index_to_dp(skb);
259 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
260 net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
261 tc_index_to_dp(skb));
263 q->backlog -= qdisc_pkt_len(skb);
265 if (gred_wred_mode(t)) {
266 if (!sch->qstats.backlog)
267 red_start_of_idle_period(&t->wred_set);
270 red_start_of_idle_period(&q->vars);
280 static void gred_reset(struct Qdisc *sch)
283 struct gred_sched *t = qdisc_priv(sch);
285 qdisc_reset_queue(sch);
287 for (i = 0; i < t->DPs; i++) {
288 struct gred_sched_data *q = t->tab[i];
293 red_restart(&q->vars);
298 static inline void gred_destroy_vq(struct gred_sched_data *q)
303 static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
305 struct gred_sched *table = qdisc_priv(sch);
306 struct tc_gred_sopt *sopt;
312 sopt = nla_data(dps);
314 if (sopt->DPs > MAX_DPs || sopt->DPs == 0 ||
315 sopt->def_DP >= sopt->DPs)
319 table->DPs = sopt->DPs;
320 table->def = sopt->def_DP;
321 table->red_flags = sopt->flags;
324 * Every entry point to GRED is synchronized with the above code
325 * and the DP is checked against DPs, i.e. shadowed VQs can no
326 * longer be found so we can unlock right here.
328 sch_tree_unlock(sch);
331 gred_enable_rio_mode(table);
332 gred_disable_wred_mode(table);
333 if (gred_wred_mode_check(sch))
334 gred_enable_wred_mode(table);
336 gred_disable_rio_mode(table);
337 gred_disable_wred_mode(table);
340 for (i = table->DPs; i < MAX_DPs; i++) {
342 pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
344 gred_destroy_vq(table->tab[i]);
345 table->tab[i] = NULL;
352 static inline int gred_change_vq(struct Qdisc *sch, int dp,
353 struct tc_gred_qopt *ctl, int prio,
355 struct gred_sched_data **prealloc)
357 struct gred_sched *table = qdisc_priv(sch);
358 struct gred_sched_data *q = table->tab[dp];
360 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
364 table->tab[dp] = q = *prealloc;
372 if (ctl->limit > sch->limit)
373 q->limit = sch->limit;
375 q->limit = ctl->limit;
378 red_end_of_idle_period(&q->vars);
380 red_set_parms(&q->parms,
381 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
382 ctl->Scell_log, stab, max_P);
383 red_set_vars(&q->vars);
387 static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
388 [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
389 [TCA_GRED_STAB] = { .len = 256 },
390 [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
391 [TCA_GRED_MAX_P] = { .type = NLA_U32 },
392 [TCA_GRED_LIMIT] = { .type = NLA_U32 },
395 static int gred_change(struct Qdisc *sch, struct nlattr *opt,
396 struct netlink_ext_ack *extack)
398 struct gred_sched *table = qdisc_priv(sch);
399 struct tc_gred_qopt *ctl;
400 struct nlattr *tb[TCA_GRED_MAX + 1];
401 int err, prio = GRED_DEF_PRIO;
404 struct gred_sched_data *prealloc;
409 err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
413 if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
414 if (tb[TCA_GRED_LIMIT] != NULL)
415 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
416 return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
419 if (tb[TCA_GRED_PARMS] == NULL ||
420 tb[TCA_GRED_STAB] == NULL ||
421 tb[TCA_GRED_LIMIT] != NULL)
424 max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
427 ctl = nla_data(tb[TCA_GRED_PARMS]);
428 stab = nla_data(tb[TCA_GRED_STAB]);
430 if (ctl->DP >= table->DPs)
433 if (gred_rio_mode(table)) {
434 if (ctl->prio == 0) {
435 int def_prio = GRED_DEF_PRIO;
437 if (table->tab[table->def])
438 def_prio = table->tab[table->def]->prio;
440 printk(KERN_DEBUG "GRED: DP %u does not have a prio "
441 "setting default to %d\n", ctl->DP, def_prio);
448 prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
451 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
455 if (gred_rio_mode(table)) {
456 gred_disable_wred_mode(table);
457 if (gred_wred_mode_check(sch))
458 gred_enable_wred_mode(table);
464 sch_tree_unlock(sch);
470 static int gred_init(struct Qdisc *sch, struct nlattr *opt,
471 struct netlink_ext_ack *extack)
473 struct nlattr *tb[TCA_GRED_MAX + 1];
479 err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
483 if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
486 if (tb[TCA_GRED_LIMIT])
487 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
489 sch->limit = qdisc_dev(sch)->tx_queue_len
490 * psched_mtu(qdisc_dev(sch));
492 return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
495 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
497 struct gred_sched *table = qdisc_priv(sch);
498 struct nlattr *parms, *opts = NULL;
501 struct tc_gred_sopt sopt = {
503 .def_DP = table->def,
504 .grio = gred_rio_mode(table),
505 .flags = table->red_flags,
508 opts = nla_nest_start(skb, TCA_OPTIONS);
510 goto nla_put_failure;
511 if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
512 goto nla_put_failure;
514 for (i = 0; i < MAX_DPs; i++) {
515 struct gred_sched_data *q = table->tab[i];
517 max_p[i] = q ? q->parms.max_P : 0;
519 if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
520 goto nla_put_failure;
522 if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
523 goto nla_put_failure;
525 parms = nla_nest_start(skb, TCA_GRED_PARMS);
527 goto nla_put_failure;
529 for (i = 0; i < MAX_DPs; i++) {
530 struct gred_sched_data *q = table->tab[i];
531 struct tc_gred_qopt opt;
534 memset(&opt, 0, sizeof(opt));
537 /* hack -- fix at some point with proper message
538 This is how we indicate to tc that there is no VQ
541 opt.DP = MAX_DPs + i;
545 opt.limit = q->limit;
547 opt.backlog = gred_backlog(table, q, sch);
549 opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
550 opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
551 opt.Wlog = q->parms.Wlog;
552 opt.Plog = q->parms.Plog;
553 opt.Scell_log = q->parms.Scell_log;
554 opt.other = q->stats.other;
555 opt.early = q->stats.prob_drop;
556 opt.forced = q->stats.forced_drop;
557 opt.pdrop = q->stats.pdrop;
558 opt.packets = q->packetsin;
559 opt.bytesin = q->bytesin;
561 if (gred_wred_mode(table))
562 gred_load_wred_set(table, q);
564 qavg = red_calc_qavg(&q->parms, &q->vars,
565 q->vars.qavg >> q->parms.Wlog);
566 opt.qave = qavg >> q->parms.Wlog;
569 if (nla_append(skb, sizeof(opt), &opt) < 0)
570 goto nla_put_failure;
573 nla_nest_end(skb, parms);
575 return nla_nest_end(skb, opts);
578 nla_nest_cancel(skb, opts);
582 static void gred_destroy(struct Qdisc *sch)
584 struct gred_sched *table = qdisc_priv(sch);
587 for (i = 0; i < table->DPs; i++) {
589 gred_destroy_vq(table->tab[i]);
593 static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
595 .priv_size = sizeof(struct gred_sched),
596 .enqueue = gred_enqueue,
597 .dequeue = gred_dequeue,
598 .peek = qdisc_peek_head,
601 .destroy = gred_destroy,
602 .change = gred_change,
604 .owner = THIS_MODULE,
607 static int __init gred_module_init(void)
609 return register_qdisc(&gred_qdisc_ops);
612 static void __exit gred_module_exit(void)
614 unregister_qdisc(&gred_qdisc_ops);
617 module_init(gred_module_init)
618 module_exit(gred_module_exit)
620 MODULE_LICENSE("GPL");