1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_tbf.c Token Bucket Filter queue.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
7 * original idea by Martin Devera
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/errno.h>
15 #include <linux/skbuff.h>
16 #include <net/netlink.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_sched.h>
21 /* Simple Token Bucket Filter.
22 =======================================
32 A data flow obeys TBF with rate R and depth B, if for any
33 time interval t_i...t_f the number of transmitted bits
34 does not exceed B + R*(t_f-t_i).
36 Packetized version of this definition:
37 The sequence of packets of sizes s_i served at moments t_i
38 obeys TBF, if for any i<=k:
40 s_i+....+s_k <= B + R*(t_k - t_i)
45 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
47 N(t+delta) = min{B/R, N(t) + delta}
49 If the first packet in queue has length S, it may be
50 transmitted only at the time t_* when S/R <= N(t_*),
51 and in this case N(t) jumps:
53 N(t_* + 0) = N(t_* - 0) - S/R.
57 Actually, QoS requires two TBF to be applied to a data stream.
58 One of them controls steady state burst size, another
59 one with rate P (peak rate) and depth M (equal to link MTU)
60 limits bursts at a smaller time scale.
62 It is easy to see that P>R, and B>M. If P is infinity, this double
63 TBF is equivalent to a single one.
65 When TBF works in reshaping mode, latency is estimated as:
67 lat = max ((L-B)/R, (L-M)/P)
73 If TBF throttles, it starts a watchdog timer, which will wake it up
74 when it is ready to transmit.
75 Note that the minimal timer resolution is 1/HZ.
76 If no new packets arrive during this period,
77 or if the device is not awaken by EOI for some previous packet,
78 TBF can stop its activity for 1/HZ.
81 This means, that with depth B, the maximal rate is
85 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
87 Note that the peak rate TBF is much more tough: with MTU 1500
88 P_crit = 150Kbytes/sec. So, if you need greater peak
89 rates, use alpha with HZ=1000 :-)
91 With classful TBF, limit is just kept for backwards compatibility.
92 It is passed to the default bfifo qdisc - if the inner qdisc is
93 changed the limit is not effective anymore.
96 struct tbf_sched_data {
98 u32 limit; /* Maximal length of backlog: bytes */
100 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
102 struct psched_ratecfg rate;
103 struct psched_ratecfg peak;
106 s64 tokens; /* Current number of B tokens */
107 s64 ptokens; /* Current number of P tokens */
108 s64 t_c; /* Time check-point */
109 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
110 struct qdisc_watchdog watchdog; /* Watchdog timer */
114 /* Time to Length, convert time in ns to length in bytes
115 * to determinate how many bytes can be sent in given time.
117 static u64 psched_ns_t2l(const struct psched_ratecfg *r,
121 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
123 u64 len = time_in_ns * r->rate_bytes_ps;
125 do_div(len, NSEC_PER_SEC);
127 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
132 if (len > r->overhead)
140 /* GSO packet is too big, segment it so that tbf can transmit
141 * each segment in time
143 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
144 struct sk_buff **to_free)
146 struct tbf_sched_data *q = qdisc_priv(sch);
147 struct sk_buff *segs, *nskb;
148 netdev_features_t features = netif_skb_features(skb);
149 unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
152 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
154 if (IS_ERR_OR_NULL(segs))
155 return qdisc_drop(skb, sch, to_free);
160 skb_mark_not_on_list(segs);
161 qdisc_skb_cb(segs)->pkt_len = segs->len;
163 ret = qdisc_enqueue(segs, q->qdisc, to_free);
164 if (ret != NET_XMIT_SUCCESS) {
165 if (net_xmit_drop_count(ret))
166 qdisc_qstats_drop(sch);
174 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
176 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
179 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
180 struct sk_buff **to_free)
182 struct tbf_sched_data *q = qdisc_priv(sch);
183 unsigned int len = qdisc_pkt_len(skb);
186 if (qdisc_pkt_len(skb) > q->max_size) {
187 if (skb_is_gso(skb) &&
188 skb_gso_validate_mac_len(skb, q->max_size))
189 return tbf_segment(skb, sch, to_free);
190 return qdisc_drop(skb, sch, to_free);
192 ret = qdisc_enqueue(skb, q->qdisc, to_free);
193 if (ret != NET_XMIT_SUCCESS) {
194 if (net_xmit_drop_count(ret))
195 qdisc_qstats_drop(sch);
199 sch->qstats.backlog += len;
201 return NET_XMIT_SUCCESS;
204 static bool tbf_peak_present(const struct tbf_sched_data *q)
206 return q->peak.rate_bytes_ps;
209 static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
211 struct tbf_sched_data *q = qdisc_priv(sch);
214 skb = q->qdisc->ops->peek(q->qdisc);
220 unsigned int len = qdisc_pkt_len(skb);
222 now = ktime_get_ns();
223 toks = min_t(s64, now - q->t_c, q->buffer);
225 if (tbf_peak_present(q)) {
226 ptoks = toks + q->ptokens;
229 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
232 if (toks > q->buffer)
234 toks -= (s64) psched_l2t_ns(&q->rate, len);
236 if ((toks|ptoks) >= 0) {
237 skb = qdisc_dequeue_peeked(q->qdisc);
244 qdisc_qstats_backlog_dec(sch, skb);
246 qdisc_bstats_update(sch, skb);
250 qdisc_watchdog_schedule_ns(&q->watchdog,
251 now + max_t(long, -toks, -ptoks));
253 /* Maybe we have a shorter packet in the queue,
254 which can be sent now. It sounds cool,
255 but, however, this is wrong in principle.
256 We MUST NOT reorder packets under these circumstances.
258 Really, if we split the flow into independent
259 subflows, it would be a very good solution.
260 This is the main idea of all FQ algorithms
261 (cf. CSZ, HPFQ, HFSC)
264 qdisc_qstats_overlimit(sch);
269 static void tbf_reset(struct Qdisc *sch)
271 struct tbf_sched_data *q = qdisc_priv(sch);
273 qdisc_reset(q->qdisc);
274 sch->qstats.backlog = 0;
276 q->t_c = ktime_get_ns();
277 q->tokens = q->buffer;
279 qdisc_watchdog_cancel(&q->watchdog);
282 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
283 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
284 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
285 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
286 [TCA_TBF_RATE64] = { .type = NLA_U64 },
287 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
288 [TCA_TBF_BURST] = { .type = NLA_U32 },
289 [TCA_TBF_PBURST] = { .type = NLA_U32 },
292 static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
293 struct netlink_ext_ack *extack)
296 struct tbf_sched_data *q = qdisc_priv(sch);
297 struct nlattr *tb[TCA_TBF_MAX + 1];
298 struct tc_tbf_qopt *qopt;
299 struct Qdisc *child = NULL;
300 struct Qdisc *old = NULL;
301 struct psched_ratecfg rate;
302 struct psched_ratecfg peak;
305 u64 rate64 = 0, prate64 = 0;
307 err = nla_parse_nested_deprecated(tb, TCA_TBF_MAX, opt, tbf_policy,
313 if (tb[TCA_TBF_PARMS] == NULL)
316 qopt = nla_data(tb[TCA_TBF_PARMS]);
317 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
318 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
322 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
323 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
327 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
328 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
330 if (tb[TCA_TBF_RATE64])
331 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
332 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
334 if (tb[TCA_TBF_BURST]) {
335 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
336 buffer = psched_l2t_ns(&rate, max_size);
338 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
341 if (qopt->peakrate.rate) {
342 if (tb[TCA_TBF_PRATE64])
343 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
344 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
345 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
346 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
347 peak.rate_bytes_ps, rate.rate_bytes_ps);
352 if (tb[TCA_TBF_PBURST]) {
353 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
354 max_size = min_t(u32, max_size, pburst);
355 mtu = psched_l2t_ns(&peak, pburst);
357 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
360 memset(&peak, 0, sizeof(peak));
363 if (max_size < psched_mtu(qdisc_dev(sch)))
364 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
365 max_size, qdisc_dev(sch)->name,
366 psched_mtu(qdisc_dev(sch)));
373 if (q->qdisc != &noop_qdisc) {
374 err = fifo_set_limit(q->qdisc, qopt->limit);
377 } else if (qopt->limit > 0) {
378 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
381 err = PTR_ERR(child);
385 /* child is fifo, no need to check for noop_qdisc */
386 qdisc_hash_add(child, true);
391 qdisc_tree_flush_backlog(q->qdisc);
395 q->limit = qopt->limit;
396 if (tb[TCA_TBF_PBURST])
399 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
400 q->max_size = max_size;
401 if (tb[TCA_TBF_BURST])
404 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
405 q->tokens = q->buffer;
408 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
409 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
411 sch_tree_unlock(sch);
418 static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
419 struct netlink_ext_ack *extack)
421 struct tbf_sched_data *q = qdisc_priv(sch);
423 qdisc_watchdog_init(&q->watchdog, sch);
424 q->qdisc = &noop_qdisc;
429 q->t_c = ktime_get_ns();
431 return tbf_change(sch, opt, extack);
434 static void tbf_destroy(struct Qdisc *sch)
436 struct tbf_sched_data *q = qdisc_priv(sch);
438 qdisc_watchdog_cancel(&q->watchdog);
442 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
444 struct tbf_sched_data *q = qdisc_priv(sch);
446 struct tc_tbf_qopt opt;
448 sch->qstats.backlog = q->qdisc->qstats.backlog;
449 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
451 goto nla_put_failure;
453 opt.limit = q->limit;
454 psched_ratecfg_getrate(&opt.rate, &q->rate);
455 if (tbf_peak_present(q))
456 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
458 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
459 opt.mtu = PSCHED_NS2TICKS(q->mtu);
460 opt.buffer = PSCHED_NS2TICKS(q->buffer);
461 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
462 goto nla_put_failure;
463 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
464 nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
466 goto nla_put_failure;
467 if (tbf_peak_present(q) &&
468 q->peak.rate_bytes_ps >= (1ULL << 32) &&
469 nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
471 goto nla_put_failure;
473 return nla_nest_end(skb, nest);
476 nla_nest_cancel(skb, nest);
480 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
481 struct sk_buff *skb, struct tcmsg *tcm)
483 struct tbf_sched_data *q = qdisc_priv(sch);
485 tcm->tcm_handle |= TC_H_MIN(1);
486 tcm->tcm_info = q->qdisc->handle;
491 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
492 struct Qdisc **old, struct netlink_ext_ack *extack)
494 struct tbf_sched_data *q = qdisc_priv(sch);
499 *old = qdisc_replace(sch, new, &q->qdisc);
503 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
505 struct tbf_sched_data *q = qdisc_priv(sch);
509 static unsigned long tbf_find(struct Qdisc *sch, u32 classid)
514 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
517 if (walker->count >= walker->skip)
518 if (walker->fn(sch, 1, walker) < 0) {
526 static const struct Qdisc_class_ops tbf_class_ops = {
531 .dump = tbf_dump_class,
534 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
536 .cl_ops = &tbf_class_ops,
538 .priv_size = sizeof(struct tbf_sched_data),
539 .enqueue = tbf_enqueue,
540 .dequeue = tbf_dequeue,
541 .peek = qdisc_peek_dequeued,
544 .destroy = tbf_destroy,
545 .change = tbf_change,
547 .owner = THIS_MODULE,
550 static int __init tbf_module_init(void)
552 return register_qdisc(&tbf_qdisc_ops);
555 static void __exit tbf_module_exit(void)
557 unregister_qdisc(&tbf_qdisc_ops);
559 module_init(tbf_module_init)
560 module_exit(tbf_module_exit)
561 MODULE_LICENSE("GPL");