2 * net/sched/sch_tbf.c Token Bucket Filter queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <net/netlink.h>
22 #include <net/sch_generic.h>
23 #include <net/pkt_sched.h>
26 /* Simple Token Bucket Filter.
27 =======================================
37 A data flow obeys TBF with rate R and depth B, if for any
38 time interval t_i...t_f the number of transmitted bits
39 does not exceed B + R*(t_f-t_i).
41 Packetized version of this definition:
42 The sequence of packets of sizes s_i served at moments t_i
43 obeys TBF, if for any i<=k:
45 s_i+....+s_k <= B + R*(t_k - t_i)
50 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
52 N(t+delta) = min{B/R, N(t) + delta}
54 If the first packet in queue has length S, it may be
55 transmitted only at the time t_* when S/R <= N(t_*),
56 and in this case N(t) jumps:
58 N(t_* + 0) = N(t_* - 0) - S/R.
62 Actually, QoS requires two TBF to be applied to a data stream.
63 One of them controls steady state burst size, another
64 one with rate P (peak rate) and depth M (equal to link MTU)
65 limits bursts at a smaller time scale.
67 It is easy to see that P>R, and B>M. If P is infinity, this double
68 TBF is equivalent to a single one.
70 When TBF works in reshaping mode, latency is estimated as:
72 lat = max ((L-B)/R, (L-M)/P)
78 If TBF throttles, it starts a watchdog timer, which will wake it up
79 when it is ready to transmit.
80 Note that the minimal timer resolution is 1/HZ.
81 If no new packets arrive during this period,
82 or if the device is not awaken by EOI for some previous packet,
83 TBF can stop its activity for 1/HZ.
86 This means, that with depth B, the maximal rate is
90 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
92 Note that the peak rate TBF is much more tough: with MTU 1500
93 P_crit = 150Kbytes/sec. So, if you need greater peak
94 rates, use alpha with HZ=1000 :-)
96 With classful TBF, limit is just kept for backwards compatibility.
97 It is passed to the default bfifo qdisc - if the inner qdisc is
98 changed the limit is not effective anymore.
101 struct tbf_sched_data {
103 u32 limit; /* Maximal length of backlog: bytes */
105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
107 struct psched_ratecfg rate;
108 struct psched_ratecfg peak;
111 s64 tokens; /* Current number of B tokens */
112 s64 ptokens; /* Current number of P tokens */
113 s64 t_c; /* Time check-point */
114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
115 struct qdisc_watchdog watchdog; /* Watchdog timer */
119 /* Time to Length, convert time in ns to length in bytes
120 * to determinate how many bytes can be sent in given time.
122 static u64 psched_ns_t2l(const struct psched_ratecfg *r,
126 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
128 u64 len = time_in_ns * r->rate_bytes_ps;
130 do_div(len, NSEC_PER_SEC);
132 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
137 if (len > r->overhead)
145 /* GSO packet is too big, segment it so that tbf can transmit
146 * each segment in time
148 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
150 struct tbf_sched_data *q = qdisc_priv(sch);
151 struct sk_buff *segs, *nskb;
152 netdev_features_t features = netif_skb_features(skb);
153 unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
156 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
158 if (IS_ERR_OR_NULL(segs))
159 return qdisc_reshape_fail(skb, sch);
165 qdisc_skb_cb(segs)->pkt_len = segs->len;
167 ret = qdisc_enqueue(segs, q->qdisc);
168 if (ret != NET_XMIT_SUCCESS) {
169 if (net_xmit_drop_count(ret))
170 qdisc_qstats_drop(sch);
178 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
180 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
183 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
185 struct tbf_sched_data *q = qdisc_priv(sch);
188 if (qdisc_pkt_len(skb) > q->max_size) {
189 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
190 return tbf_segment(skb, sch);
191 return qdisc_reshape_fail(skb, sch);
193 ret = qdisc_enqueue(skb, q->qdisc);
194 if (ret != NET_XMIT_SUCCESS) {
195 if (net_xmit_drop_count(ret))
196 qdisc_qstats_drop(sch);
200 qdisc_qstats_backlog_inc(sch, skb);
202 return NET_XMIT_SUCCESS;
205 static unsigned int tbf_drop(struct Qdisc *sch)
207 struct tbf_sched_data *q = qdisc_priv(sch);
208 unsigned int len = 0;
210 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
211 sch->qstats.backlog -= len;
213 qdisc_qstats_drop(sch);
218 static bool tbf_peak_present(const struct tbf_sched_data *q)
220 return q->peak.rate_bytes_ps;
223 static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
225 struct tbf_sched_data *q = qdisc_priv(sch);
228 skb = q->qdisc->ops->peek(q->qdisc);
234 unsigned int len = qdisc_pkt_len(skb);
236 now = ktime_get_ns();
237 toks = min_t(s64, now - q->t_c, q->buffer);
239 if (tbf_peak_present(q)) {
240 ptoks = toks + q->ptokens;
243 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
246 if (toks > q->buffer)
248 toks -= (s64) psched_l2t_ns(&q->rate, len);
250 if ((toks|ptoks) >= 0) {
251 skb = qdisc_dequeue_peeked(q->qdisc);
258 qdisc_qstats_backlog_dec(sch, skb);
260 qdisc_unthrottled(sch);
261 qdisc_bstats_update(sch, skb);
265 qdisc_watchdog_schedule_ns(&q->watchdog,
266 now + max_t(long, -toks, -ptoks),
269 /* Maybe we have a shorter packet in the queue,
270 which can be sent now. It sounds cool,
271 but, however, this is wrong in principle.
272 We MUST NOT reorder packets under these circumstances.
274 Really, if we split the flow into independent
275 subflows, it would be a very good solution.
276 This is the main idea of all FQ algorithms
277 (cf. CSZ, HPFQ, HFSC)
280 qdisc_qstats_overlimit(sch);
285 static void tbf_reset(struct Qdisc *sch)
287 struct tbf_sched_data *q = qdisc_priv(sch);
289 qdisc_reset(q->qdisc);
290 sch->qstats.backlog = 0;
292 q->t_c = ktime_get_ns();
293 q->tokens = q->buffer;
295 qdisc_watchdog_cancel(&q->watchdog);
298 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
299 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
300 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
301 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
302 [TCA_TBF_RATE64] = { .type = NLA_U64 },
303 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
304 [TCA_TBF_BURST] = { .type = NLA_U32 },
305 [TCA_TBF_PBURST] = { .type = NLA_U32 },
308 static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
311 struct tbf_sched_data *q = qdisc_priv(sch);
312 struct nlattr *tb[TCA_TBF_MAX + 1];
313 struct tc_tbf_qopt *qopt;
314 struct Qdisc *child = NULL;
315 struct psched_ratecfg rate;
316 struct psched_ratecfg peak;
319 u64 rate64 = 0, prate64 = 0;
321 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
326 if (tb[TCA_TBF_PARMS] == NULL)
329 qopt = nla_data(tb[TCA_TBF_PARMS]);
330 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
331 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
334 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
335 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
338 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
339 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
341 if (tb[TCA_TBF_RATE64])
342 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
343 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
345 if (tb[TCA_TBF_BURST]) {
346 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
347 buffer = psched_l2t_ns(&rate, max_size);
349 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
352 if (qopt->peakrate.rate) {
353 if (tb[TCA_TBF_PRATE64])
354 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
355 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
356 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
357 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
358 peak.rate_bytes_ps, rate.rate_bytes_ps);
363 if (tb[TCA_TBF_PBURST]) {
364 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
365 max_size = min_t(u32, max_size, pburst);
366 mtu = psched_l2t_ns(&peak, pburst);
368 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
371 memset(&peak, 0, sizeof(peak));
374 if (max_size < psched_mtu(qdisc_dev(sch)))
375 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
376 max_size, qdisc_dev(sch)->name,
377 psched_mtu(qdisc_dev(sch)));
384 if (q->qdisc != &noop_qdisc) {
385 err = fifo_set_limit(q->qdisc, qopt->limit);
388 } else if (qopt->limit > 0) {
389 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
391 err = PTR_ERR(child);
398 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
399 q->qdisc->qstats.backlog);
400 qdisc_destroy(q->qdisc);
403 q->limit = qopt->limit;
404 if (tb[TCA_TBF_PBURST])
407 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
408 q->max_size = max_size;
409 if (tb[TCA_TBF_BURST])
412 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
413 q->tokens = q->buffer;
416 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
417 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
419 sch_tree_unlock(sch);
425 static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
427 struct tbf_sched_data *q = qdisc_priv(sch);
429 qdisc_watchdog_init(&q->watchdog, sch);
430 q->qdisc = &noop_qdisc;
435 q->t_c = ktime_get_ns();
437 return tbf_change(sch, opt);
440 static void tbf_destroy(struct Qdisc *sch)
442 struct tbf_sched_data *q = qdisc_priv(sch);
444 qdisc_watchdog_cancel(&q->watchdog);
445 qdisc_destroy(q->qdisc);
448 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
450 struct tbf_sched_data *q = qdisc_priv(sch);
452 struct tc_tbf_qopt opt;
454 sch->qstats.backlog = q->qdisc->qstats.backlog;
455 nest = nla_nest_start(skb, TCA_OPTIONS);
457 goto nla_put_failure;
459 opt.limit = q->limit;
460 psched_ratecfg_getrate(&opt.rate, &q->rate);
461 if (tbf_peak_present(q))
462 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
464 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
465 opt.mtu = PSCHED_NS2TICKS(q->mtu);
466 opt.buffer = PSCHED_NS2TICKS(q->buffer);
467 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
468 goto nla_put_failure;
469 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
470 nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
471 goto nla_put_failure;
472 if (tbf_peak_present(q) &&
473 q->peak.rate_bytes_ps >= (1ULL << 32) &&
474 nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
475 goto nla_put_failure;
477 return nla_nest_end(skb, nest);
480 nla_nest_cancel(skb, nest);
484 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
485 struct sk_buff *skb, struct tcmsg *tcm)
487 struct tbf_sched_data *q = qdisc_priv(sch);
489 tcm->tcm_handle |= TC_H_MIN(1);
490 tcm->tcm_info = q->qdisc->handle;
495 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
498 struct tbf_sched_data *q = qdisc_priv(sch);
503 *old = qdisc_replace(sch, new, &q->qdisc);
507 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
509 struct tbf_sched_data *q = qdisc_priv(sch);
513 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
518 static void tbf_put(struct Qdisc *sch, unsigned long arg)
522 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
525 if (walker->count >= walker->skip)
526 if (walker->fn(sch, 1, walker) < 0) {
534 static const struct Qdisc_class_ops tbf_class_ops = {
540 .dump = tbf_dump_class,
543 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
545 .cl_ops = &tbf_class_ops,
547 .priv_size = sizeof(struct tbf_sched_data),
548 .enqueue = tbf_enqueue,
549 .dequeue = tbf_dequeue,
550 .peek = qdisc_peek_dequeued,
554 .destroy = tbf_destroy,
555 .change = tbf_change,
557 .owner = THIS_MODULE,
560 static int __init tbf_module_init(void)
562 return register_qdisc(&tbf_qdisc_ops);
565 static void __exit tbf_module_exit(void)
567 unregister_qdisc(&tbf_qdisc_ops);
569 module_init(tbf_module_init)
570 module_exit(tbf_module_exit)
571 MODULE_LICENSE("GPL");