2 * net/sched/sch_red.c Random Early Detection queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_sched.h>
22 #include <net/inet_ecn.h>
26 /* Parameters, settable by user:
27 -----------------------------
29 limit - bytes (must be > qth_max + burst)
31 Hard limit on queue length, should be chosen >qth_max
32 to allow packet bursts. This parameter does not
33 affect the algorithms behaviour and can be chosen
34 arbitrarily high (well, less than ram size)
35 Really, this limit will never be reached
36 if RED works correctly.
39 struct red_sched_data {
40 u32 limit; /* HARD maximal queue length */
42 struct timer_list adapt_timer;
43 struct red_parms parms;
45 struct red_stats stats;
49 static inline int red_use_ecn(struct red_sched_data *q)
51 return q->flags & TC_RED_ECN;
54 static inline int red_use_harddrop(struct red_sched_data *q)
56 return q->flags & TC_RED_HARDDROP;
59 static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
60 struct sk_buff **to_free)
62 struct red_sched_data *q = qdisc_priv(sch);
63 struct Qdisc *child = q->qdisc;
67 q->vars.qavg = red_calc_qavg(&q->parms,
69 child->qstats.backlog);
71 if (red_is_idling(&q->vars))
72 red_end_of_idle_period(&q->vars);
74 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
79 qdisc_qstats_overlimit(sch);
80 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
89 qdisc_qstats_overlimit(sch);
90 if (red_use_harddrop(q) || !red_use_ecn(q) ||
91 !INET_ECN_set_ce(skb)) {
92 q->stats.forced_drop++;
96 q->stats.forced_mark++;
100 len = qdisc_pkt_len(skb);
101 ret = qdisc_enqueue(skb, child, to_free);
102 if (likely(ret == NET_XMIT_SUCCESS)) {
103 sch->qstats.backlog += len;
105 } else if (net_xmit_drop_count(ret)) {
107 qdisc_qstats_drop(sch);
112 qdisc_drop(skb, sch, to_free);
116 static struct sk_buff *red_dequeue(struct Qdisc *sch)
119 struct red_sched_data *q = qdisc_priv(sch);
120 struct Qdisc *child = q->qdisc;
122 skb = child->dequeue(child);
124 qdisc_bstats_update(sch, skb);
125 qdisc_qstats_backlog_dec(sch, skb);
128 if (!red_is_idling(&q->vars))
129 red_start_of_idle_period(&q->vars);
134 static struct sk_buff *red_peek(struct Qdisc *sch)
136 struct red_sched_data *q = qdisc_priv(sch);
137 struct Qdisc *child = q->qdisc;
139 return child->ops->peek(child);
142 static void red_reset(struct Qdisc *sch)
144 struct red_sched_data *q = qdisc_priv(sch);
146 qdisc_reset(q->qdisc);
147 sch->qstats.backlog = 0;
149 red_restart(&q->vars);
152 static void red_destroy(struct Qdisc *sch)
154 struct red_sched_data *q = qdisc_priv(sch);
156 del_timer_sync(&q->adapt_timer);
157 qdisc_destroy(q->qdisc);
160 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
161 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
162 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
163 [TCA_RED_MAX_P] = { .type = NLA_U32 },
166 static int red_change(struct Qdisc *sch, struct nlattr *opt)
168 struct red_sched_data *q = qdisc_priv(sch);
169 struct nlattr *tb[TCA_RED_MAX + 1];
170 struct tc_red_qopt *ctl;
171 struct Qdisc *child = NULL;
179 err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy, NULL);
183 if (tb[TCA_RED_PARMS] == NULL ||
184 tb[TCA_RED_STAB] == NULL)
187 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
189 ctl = nla_data(tb[TCA_RED_PARMS]);
190 stab = nla_data(tb[TCA_RED_STAB]);
191 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
192 ctl->Scell_log, stab))
195 if (ctl->limit > 0) {
196 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
198 return PTR_ERR(child);
200 /* child is fifo, no need to check for noop_qdisc */
201 qdisc_hash_add(child, true);
205 q->flags = ctl->flags;
206 q->limit = ctl->limit;
208 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
209 q->qdisc->qstats.backlog);
210 qdisc_destroy(q->qdisc);
214 red_set_parms(&q->parms,
215 ctl->qth_min, ctl->qth_max, ctl->Wlog,
216 ctl->Plog, ctl->Scell_log,
219 red_set_vars(&q->vars);
221 del_timer(&q->adapt_timer);
222 if (ctl->flags & TC_RED_ADAPTATIVE)
223 mod_timer(&q->adapt_timer, jiffies + HZ/2);
225 if (!q->qdisc->q.qlen)
226 red_start_of_idle_period(&q->vars);
228 sch_tree_unlock(sch);
232 static inline void red_adaptative_timer(unsigned long arg)
234 struct Qdisc *sch = (struct Qdisc *)arg;
235 struct red_sched_data *q = qdisc_priv(sch);
236 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
238 spin_lock(root_lock);
239 red_adaptative_algo(&q->parms, &q->vars);
240 mod_timer(&q->adapt_timer, jiffies + HZ/2);
241 spin_unlock(root_lock);
244 static int red_init(struct Qdisc *sch, struct nlattr *opt)
246 struct red_sched_data *q = qdisc_priv(sch);
248 q->qdisc = &noop_qdisc;
249 setup_timer(&q->adapt_timer, red_adaptative_timer, (unsigned long)sch);
250 return red_change(sch, opt);
253 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
255 struct red_sched_data *q = qdisc_priv(sch);
256 struct nlattr *opts = NULL;
257 struct tc_red_qopt opt = {
260 .qth_min = q->parms.qth_min >> q->parms.Wlog,
261 .qth_max = q->parms.qth_max >> q->parms.Wlog,
262 .Wlog = q->parms.Wlog,
263 .Plog = q->parms.Plog,
264 .Scell_log = q->parms.Scell_log,
267 sch->qstats.backlog = q->qdisc->qstats.backlog;
268 opts = nla_nest_start(skb, TCA_OPTIONS);
270 goto nla_put_failure;
271 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
272 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
273 goto nla_put_failure;
274 return nla_nest_end(skb, opts);
277 nla_nest_cancel(skb, opts);
281 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
283 struct red_sched_data *q = qdisc_priv(sch);
284 struct tc_red_xstats st = {
285 .early = q->stats.prob_drop + q->stats.forced_drop,
286 .pdrop = q->stats.pdrop,
287 .other = q->stats.other,
288 .marked = q->stats.prob_mark + q->stats.forced_mark,
291 return gnet_stats_copy_app(d, &st, sizeof(st));
294 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
295 struct sk_buff *skb, struct tcmsg *tcm)
297 struct red_sched_data *q = qdisc_priv(sch);
299 tcm->tcm_handle |= TC_H_MIN(1);
300 tcm->tcm_info = q->qdisc->handle;
304 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
307 struct red_sched_data *q = qdisc_priv(sch);
312 *old = qdisc_replace(sch, new, &q->qdisc);
316 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
318 struct red_sched_data *q = qdisc_priv(sch);
322 static unsigned long red_find(struct Qdisc *sch, u32 classid)
327 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
330 if (walker->count >= walker->skip)
331 if (walker->fn(sch, 1, walker) < 0) {
339 static const struct Qdisc_class_ops red_class_ops = {
344 .dump = red_dump_class,
347 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
349 .priv_size = sizeof(struct red_sched_data),
350 .cl_ops = &red_class_ops,
351 .enqueue = red_enqueue,
352 .dequeue = red_dequeue,
356 .destroy = red_destroy,
357 .change = red_change,
359 .dump_stats = red_dump_stats,
360 .owner = THIS_MODULE,
363 static int __init red_module_init(void)
365 return register_qdisc(&red_qdisc_ops);
368 static void __exit red_module_exit(void)
370 unregister_qdisc(&red_qdisc_ops);
373 module_init(red_module_init)
374 module_exit(red_module_exit)
376 MODULE_LICENSE("GPL");