2 * Codel - The Controlled-Delay Active Queue Management algorithm
4 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
5 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
7 * Implemented on linux by :
8 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
9 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The names of the authors may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * Alternatively, provided that this notice is retained in full, this
24 * software may be distributed under the terms of the GNU General
25 * Public License ("GPL") version 2, in which case the provisions of the
26 * GPL apply INSTEAD OF those given above.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
43 #include <linux/module.h>
44 #include <linux/slab.h>
45 #include <linux/types.h>
46 #include <linux/kernel.h>
47 #include <linux/errno.h>
48 #include <linux/skbuff.h>
49 #include <linux/prefetch.h>
50 #include <net/pkt_sched.h>
51 #include <net/codel.h>
54 #define DEFAULT_CODEL_LIMIT 1000
56 struct codel_sched_data {
57 struct codel_params params;
58 struct codel_vars vars;
59 struct codel_stats stats;
63 /* This is the specific function called from codel_dequeue()
64 * to dequeue a packet from queue. Note: backlog is handled in
65 * codel, we dont need to reduce it here.
67 static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
69 struct sk_buff *skb = __skb_dequeue(&sch->q);
72 prefetch(&skb->end); /* we'll need skb_shinfo() */
76 static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
78 struct codel_sched_data *q = qdisc_priv(sch);
81 skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
83 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
84 * or HTB crashes. Defer it for next round.
86 if (q->stats.drop_count && sch->q.qlen) {
87 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
88 q->stats.drop_count = 0;
89 q->stats.drop_len = 0;
92 qdisc_bstats_update(sch, skb);
96 static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
98 struct codel_sched_data *q;
100 if (likely(qdisc_qlen(sch) < sch->limit)) {
101 codel_set_enqueue_time(skb);
102 return qdisc_enqueue_tail(skb, sch);
106 return qdisc_drop(skb, sch);
109 static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
110 [TCA_CODEL_TARGET] = { .type = NLA_U32 },
111 [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
112 [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
113 [TCA_CODEL_ECN] = { .type = NLA_U32 },
114 [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
117 static int codel_change(struct Qdisc *sch, struct nlattr *opt)
119 struct codel_sched_data *q = qdisc_priv(sch);
120 struct nlattr *tb[TCA_CODEL_MAX + 1];
121 unsigned int qlen, dropped = 0;
127 err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
133 if (tb[TCA_CODEL_TARGET]) {
134 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
136 q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
139 if (tb[TCA_CODEL_CE_THRESHOLD]) {
140 u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
142 q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
145 if (tb[TCA_CODEL_INTERVAL]) {
146 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
148 q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
151 if (tb[TCA_CODEL_LIMIT])
152 sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
154 if (tb[TCA_CODEL_ECN])
155 q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
158 while (sch->q.qlen > sch->limit) {
159 struct sk_buff *skb = __skb_dequeue(&sch->q);
161 dropped += qdisc_pkt_len(skb);
162 qdisc_qstats_backlog_dec(sch, skb);
163 qdisc_drop(skb, sch);
165 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
167 sch_tree_unlock(sch);
171 static int codel_init(struct Qdisc *sch, struct nlattr *opt)
173 struct codel_sched_data *q = qdisc_priv(sch);
175 sch->limit = DEFAULT_CODEL_LIMIT;
177 codel_params_init(&q->params, sch);
178 codel_vars_init(&q->vars);
179 codel_stats_init(&q->stats);
182 int err = codel_change(sch, opt);
189 sch->flags |= TCQ_F_CAN_BYPASS;
191 sch->flags &= ~TCQ_F_CAN_BYPASS;
196 static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
198 struct codel_sched_data *q = qdisc_priv(sch);
201 opts = nla_nest_start(skb, TCA_OPTIONS);
203 goto nla_put_failure;
205 if (nla_put_u32(skb, TCA_CODEL_TARGET,
206 codel_time_to_us(q->params.target)) ||
207 nla_put_u32(skb, TCA_CODEL_LIMIT,
209 nla_put_u32(skb, TCA_CODEL_INTERVAL,
210 codel_time_to_us(q->params.interval)) ||
211 nla_put_u32(skb, TCA_CODEL_ECN,
213 goto nla_put_failure;
214 if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD &&
215 nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
216 codel_time_to_us(q->params.ce_threshold)))
217 goto nla_put_failure;
218 return nla_nest_end(skb, opts);
221 nla_nest_cancel(skb, opts);
225 static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
227 const struct codel_sched_data *q = qdisc_priv(sch);
228 struct tc_codel_xstats st = {
229 .maxpacket = q->stats.maxpacket,
230 .count = q->vars.count,
231 .lastcount = q->vars.lastcount,
232 .drop_overlimit = q->drop_overlimit,
233 .ldelay = codel_time_to_us(q->vars.ldelay),
234 .dropping = q->vars.dropping,
235 .ecn_mark = q->stats.ecn_mark,
236 .ce_mark = q->stats.ce_mark,
239 if (q->vars.dropping) {
240 codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
243 st.drop_next = codel_time_to_us(delta);
245 st.drop_next = -codel_time_to_us(-delta);
248 return gnet_stats_copy_app(d, &st, sizeof(st));
251 static void codel_reset(struct Qdisc *sch)
253 struct codel_sched_data *q = qdisc_priv(sch);
255 qdisc_reset_queue(sch);
256 codel_vars_init(&q->vars);
259 static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
261 .priv_size = sizeof(struct codel_sched_data),
263 .enqueue = codel_qdisc_enqueue,
264 .dequeue = codel_qdisc_dequeue,
265 .peek = qdisc_peek_dequeued,
267 .reset = codel_reset,
268 .change = codel_change,
270 .dump_stats = codel_dump_stats,
271 .owner = THIS_MODULE,
274 static int __init codel_module_init(void)
276 return register_qdisc(&codel_qdisc_ops);
279 static void __exit codel_module_exit(void)
281 unregister_qdisc(&codel_qdisc_ops);
284 module_init(codel_module_init)
285 module_exit(codel_module_exit)
287 MODULE_DESCRIPTION("Controlled Delay queue discipline");
288 MODULE_AUTHOR("Dave Taht");
289 MODULE_AUTHOR("Eric Dumazet");
290 MODULE_LICENSE("Dual BSD/GPL");