1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_red.c Random Early Detection queue.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 * J Hadi Salim 980914: computation fixes
9 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
10 * J Hadi Salim 980816: ECN support
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/skbuff.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
19 #include <net/inet_ecn.h>
23 /* Parameters, settable by user:
24 -----------------------------
26 limit - bytes (must be > qth_max + burst)
28 Hard limit on queue length, should be chosen >qth_max
29 to allow packet bursts. This parameter does not
30 affect the algorithms behaviour and can be chosen
31 arbitrarily high (well, less than ram size)
32 Really, this limit will never be reached
33 if RED works correctly.
36 struct red_sched_data {
37 u32 limit; /* HARD maximal queue length */
39 struct timer_list adapt_timer;
41 struct red_parms parms;
43 struct red_stats stats;
47 static inline int red_use_ecn(struct red_sched_data *q)
49 return q->flags & TC_RED_ECN;
52 static inline int red_use_harddrop(struct red_sched_data *q)
54 return q->flags & TC_RED_HARDDROP;
57 static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
58 struct sk_buff **to_free)
60 struct red_sched_data *q = qdisc_priv(sch);
61 struct Qdisc *child = q->qdisc;
64 q->vars.qavg = red_calc_qavg(&q->parms,
66 child->qstats.backlog);
68 if (red_is_idling(&q->vars))
69 red_end_of_idle_period(&q->vars);
71 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
76 qdisc_qstats_overlimit(sch);
77 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
86 qdisc_qstats_overlimit(sch);
87 if (red_use_harddrop(q) || !red_use_ecn(q) ||
88 !INET_ECN_set_ce(skb)) {
89 q->stats.forced_drop++;
93 q->stats.forced_mark++;
97 ret = qdisc_enqueue(skb, child, to_free);
98 if (likely(ret == NET_XMIT_SUCCESS)) {
99 qdisc_qstats_backlog_inc(sch, skb);
101 } else if (net_xmit_drop_count(ret)) {
103 qdisc_qstats_drop(sch);
108 qdisc_drop(skb, sch, to_free);
112 static struct sk_buff *red_dequeue(struct Qdisc *sch)
115 struct red_sched_data *q = qdisc_priv(sch);
116 struct Qdisc *child = q->qdisc;
118 skb = child->dequeue(child);
120 qdisc_bstats_update(sch, skb);
121 qdisc_qstats_backlog_dec(sch, skb);
124 if (!red_is_idling(&q->vars))
125 red_start_of_idle_period(&q->vars);
130 static struct sk_buff *red_peek(struct Qdisc *sch)
132 struct red_sched_data *q = qdisc_priv(sch);
133 struct Qdisc *child = q->qdisc;
135 return child->ops->peek(child);
138 static void red_reset(struct Qdisc *sch)
140 struct red_sched_data *q = qdisc_priv(sch);
142 qdisc_reset(q->qdisc);
143 sch->qstats.backlog = 0;
145 red_restart(&q->vars);
148 static int red_offload(struct Qdisc *sch, bool enable)
150 struct red_sched_data *q = qdisc_priv(sch);
151 struct net_device *dev = qdisc_dev(sch);
152 struct tc_red_qopt_offload opt = {
153 .handle = sch->handle,
154 .parent = sch->parent,
157 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
161 opt.command = TC_RED_REPLACE;
162 opt.set.min = q->parms.qth_min >> q->parms.Wlog;
163 opt.set.max = q->parms.qth_max >> q->parms.Wlog;
164 opt.set.probability = q->parms.max_P;
165 opt.set.limit = q->limit;
166 opt.set.is_ecn = red_use_ecn(q);
167 opt.set.is_harddrop = red_use_harddrop(q);
168 opt.set.qstats = &sch->qstats;
170 opt.command = TC_RED_DESTROY;
173 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
176 static void red_destroy(struct Qdisc *sch)
178 struct red_sched_data *q = qdisc_priv(sch);
180 del_timer_sync(&q->adapt_timer);
181 red_offload(sch, false);
185 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
186 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
187 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
188 [TCA_RED_MAX_P] = { .type = NLA_U32 },
191 static int red_change(struct Qdisc *sch, struct nlattr *opt,
192 struct netlink_ext_ack *extack)
194 struct Qdisc *old_child = NULL, *child = NULL;
195 struct red_sched_data *q = qdisc_priv(sch);
196 struct nlattr *tb[TCA_RED_MAX + 1];
197 struct tc_red_qopt *ctl;
205 err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
210 if (tb[TCA_RED_PARMS] == NULL ||
211 tb[TCA_RED_STAB] == NULL)
214 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
216 ctl = nla_data(tb[TCA_RED_PARMS]);
217 stab = nla_data(tb[TCA_RED_STAB]);
218 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
219 ctl->Scell_log, stab))
222 if (ctl->limit > 0) {
223 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
226 return PTR_ERR(child);
228 /* child is fifo, no need to check for noop_qdisc */
229 qdisc_hash_add(child, true);
233 q->flags = ctl->flags;
234 q->limit = ctl->limit;
236 qdisc_tree_flush_backlog(q->qdisc);
237 old_child = q->qdisc;
241 red_set_parms(&q->parms,
242 ctl->qth_min, ctl->qth_max, ctl->Wlog,
243 ctl->Plog, ctl->Scell_log,
246 red_set_vars(&q->vars);
248 del_timer(&q->adapt_timer);
249 if (ctl->flags & TC_RED_ADAPTATIVE)
250 mod_timer(&q->adapt_timer, jiffies + HZ/2);
252 if (!q->qdisc->q.qlen)
253 red_start_of_idle_period(&q->vars);
255 sch_tree_unlock(sch);
257 red_offload(sch, true);
260 qdisc_put(old_child);
264 static inline void red_adaptative_timer(struct timer_list *t)
266 struct red_sched_data *q = from_timer(q, t, adapt_timer);
267 struct Qdisc *sch = q->sch;
268 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
270 spin_lock(root_lock);
271 red_adaptative_algo(&q->parms, &q->vars);
272 mod_timer(&q->adapt_timer, jiffies + HZ/2);
273 spin_unlock(root_lock);
276 static int red_init(struct Qdisc *sch, struct nlattr *opt,
277 struct netlink_ext_ack *extack)
279 struct red_sched_data *q = qdisc_priv(sch);
281 q->qdisc = &noop_qdisc;
283 timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
284 return red_change(sch, opt, extack);
287 static int red_dump_offload_stats(struct Qdisc *sch)
289 struct tc_red_qopt_offload hw_stats = {
290 .command = TC_RED_STATS,
291 .handle = sch->handle,
292 .parent = sch->parent,
294 .stats.bstats = &sch->bstats,
295 .stats.qstats = &sch->qstats,
299 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
302 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
304 struct red_sched_data *q = qdisc_priv(sch);
305 struct nlattr *opts = NULL;
306 struct tc_red_qopt opt = {
309 .qth_min = q->parms.qth_min >> q->parms.Wlog,
310 .qth_max = q->parms.qth_max >> q->parms.Wlog,
311 .Wlog = q->parms.Wlog,
312 .Plog = q->parms.Plog,
313 .Scell_log = q->parms.Scell_log,
317 err = red_dump_offload_stats(sch);
319 goto nla_put_failure;
321 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
323 goto nla_put_failure;
324 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
325 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
326 goto nla_put_failure;
327 return nla_nest_end(skb, opts);
330 nla_nest_cancel(skb, opts);
334 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
336 struct red_sched_data *q = qdisc_priv(sch);
337 struct net_device *dev = qdisc_dev(sch);
338 struct tc_red_xstats st = {0};
340 if (sch->flags & TCQ_F_OFFLOADED) {
341 struct tc_red_qopt_offload hw_stats_request = {
342 .command = TC_RED_XSTATS,
343 .handle = sch->handle,
344 .parent = sch->parent,
349 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
352 st.early = q->stats.prob_drop + q->stats.forced_drop;
353 st.pdrop = q->stats.pdrop;
354 st.other = q->stats.other;
355 st.marked = q->stats.prob_mark + q->stats.forced_mark;
357 return gnet_stats_copy_app(d, &st, sizeof(st));
360 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
361 struct sk_buff *skb, struct tcmsg *tcm)
363 struct red_sched_data *q = qdisc_priv(sch);
365 tcm->tcm_handle |= TC_H_MIN(1);
366 tcm->tcm_info = q->qdisc->handle;
370 static void red_graft_offload(struct Qdisc *sch,
371 struct Qdisc *new, struct Qdisc *old,
372 struct netlink_ext_ack *extack)
374 struct tc_red_qopt_offload graft_offload = {
375 .handle = sch->handle,
376 .parent = sch->parent,
377 .child_handle = new->handle,
378 .command = TC_RED_GRAFT,
381 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
382 TC_SETUP_QDISC_RED, &graft_offload, extack);
385 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
386 struct Qdisc **old, struct netlink_ext_ack *extack)
388 struct red_sched_data *q = qdisc_priv(sch);
393 *old = qdisc_replace(sch, new, &q->qdisc);
395 red_graft_offload(sch, new, *old, extack);
399 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
401 struct red_sched_data *q = qdisc_priv(sch);
405 static unsigned long red_find(struct Qdisc *sch, u32 classid)
410 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
413 if (walker->count >= walker->skip)
414 if (walker->fn(sch, 1, walker) < 0) {
422 static const struct Qdisc_class_ops red_class_ops = {
427 .dump = red_dump_class,
430 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
432 .priv_size = sizeof(struct red_sched_data),
433 .cl_ops = &red_class_ops,
434 .enqueue = red_enqueue,
435 .dequeue = red_dequeue,
439 .destroy = red_destroy,
440 .change = red_change,
442 .dump_stats = red_dump_stats,
443 .owner = THIS_MODULE,
446 static int __init red_module_init(void)
448 return register_qdisc(&red_qdisc_ops);
451 static void __exit red_module_exit(void)
453 unregister_qdisc(&red_qdisc_ops);
456 module_init(red_module_init)
457 module_exit(red_module_exit)
459 MODULE_LICENSE("GPL");