1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_tbf.c Token Bucket Filter queue.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
7 * original idea by Martin Devera
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/errno.h>
15 #include <linux/skbuff.h>
17 #include <net/netlink.h>
18 #include <net/sch_generic.h>
19 #include <net/pkt_cls.h>
20 #include <net/pkt_sched.h>
23 /* Simple Token Bucket Filter.
24 =======================================
34 A data flow obeys TBF with rate R and depth B, if for any
35 time interval t_i...t_f the number of transmitted bits
36 does not exceed B + R*(t_f-t_i).
38 Packetized version of this definition:
39 The sequence of packets of sizes s_i served at moments t_i
40 obeys TBF, if for any i<=k:
42 s_i+....+s_k <= B + R*(t_k - t_i)
47 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
49 N(t+delta) = min{B/R, N(t) + delta}
51 If the first packet in queue has length S, it may be
52 transmitted only at the time t_* when S/R <= N(t_*),
53 and in this case N(t) jumps:
55 N(t_* + 0) = N(t_* - 0) - S/R.
59 Actually, QoS requires two TBF to be applied to a data stream.
60 One of them controls steady state burst size, another
61 one with rate P (peak rate) and depth M (equal to link MTU)
62 limits bursts at a smaller time scale.
64 It is easy to see that P>R, and B>M. If P is infinity, this double
65 TBF is equivalent to a single one.
67 When TBF works in reshaping mode, latency is estimated as:
69 lat = max ((L-B)/R, (L-M)/P)
75 If TBF throttles, it starts a watchdog timer, which will wake it up
76 when it is ready to transmit.
77 Note that the minimal timer resolution is 1/HZ.
78 If no new packets arrive during this period,
79 or if the device is not awaken by EOI for some previous packet,
80 TBF can stop its activity for 1/HZ.
83 This means, that with depth B, the maximal rate is
87 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
89 Note that the peak rate TBF is much more tough: with MTU 1500
90 P_crit = 150Kbytes/sec. So, if you need greater peak
91 rates, use alpha with HZ=1000 :-)
93 With classful TBF, limit is just kept for backwards compatibility.
94 It is passed to the default bfifo qdisc - if the inner qdisc is
95 changed the limit is not effective anymore.
98 struct tbf_sched_data {
100 u32 limit; /* Maximal length of backlog: bytes */
102 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
104 struct psched_ratecfg rate;
105 struct psched_ratecfg peak;
108 s64 tokens; /* Current number of B tokens */
109 s64 ptokens; /* Current number of P tokens */
110 s64 t_c; /* Time check-point */
111 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
112 struct qdisc_watchdog watchdog; /* Watchdog timer */
116 /* Time to Length, convert time in ns to length in bytes
117 * to determinate how many bytes can be sent in given time.
119 static u64 psched_ns_t2l(const struct psched_ratecfg *r,
123 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
125 u64 len = time_in_ns * r->rate_bytes_ps;
127 do_div(len, NSEC_PER_SEC);
129 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
134 if (len > r->overhead)
142 static void tbf_offload_change(struct Qdisc *sch)
144 struct tbf_sched_data *q = qdisc_priv(sch);
145 struct net_device *dev = qdisc_dev(sch);
146 struct tc_tbf_qopt_offload qopt;
148 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
151 qopt.command = TC_TBF_REPLACE;
152 qopt.handle = sch->handle;
153 qopt.parent = sch->parent;
154 qopt.replace_params.rate = q->rate;
155 qopt.replace_params.max_size = q->max_size;
156 qopt.replace_params.qstats = &sch->qstats;
158 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TBF, &qopt);
161 static void tbf_offload_destroy(struct Qdisc *sch)
163 struct net_device *dev = qdisc_dev(sch);
164 struct tc_tbf_qopt_offload qopt;
166 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
169 qopt.command = TC_TBF_DESTROY;
170 qopt.handle = sch->handle;
171 qopt.parent = sch->parent;
172 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TBF, &qopt);
175 static int tbf_offload_dump(struct Qdisc *sch)
177 struct tc_tbf_qopt_offload qopt;
179 qopt.command = TC_TBF_STATS;
180 qopt.handle = sch->handle;
181 qopt.parent = sch->parent;
182 qopt.stats.bstats = &sch->bstats;
183 qopt.stats.qstats = &sch->qstats;
185 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_TBF, &qopt);
188 static void tbf_offload_graft(struct Qdisc *sch, struct Qdisc *new,
189 struct Qdisc *old, struct netlink_ext_ack *extack)
191 struct tc_tbf_qopt_offload graft_offload = {
192 .handle = sch->handle,
193 .parent = sch->parent,
194 .child_handle = new->handle,
195 .command = TC_TBF_GRAFT,
198 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
199 TC_SETUP_QDISC_TBF, &graft_offload, extack);
202 /* GSO packet is too big, segment it so that tbf can transmit
203 * each segment in time
205 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
206 struct sk_buff **to_free)
208 struct tbf_sched_data *q = qdisc_priv(sch);
209 struct sk_buff *segs, *nskb;
210 netdev_features_t features = netif_skb_features(skb);
211 unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
214 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
216 if (IS_ERR_OR_NULL(segs))
217 return qdisc_drop(skb, sch, to_free);
220 skb_list_walk_safe(segs, segs, nskb) {
221 skb_mark_not_on_list(segs);
222 qdisc_skb_cb(segs)->pkt_len = segs->len;
224 ret = qdisc_enqueue(segs, q->qdisc, to_free);
225 if (ret != NET_XMIT_SUCCESS) {
226 if (net_xmit_drop_count(ret))
227 qdisc_qstats_drop(sch);
234 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
236 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
239 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
240 struct sk_buff **to_free)
242 struct tbf_sched_data *q = qdisc_priv(sch);
243 unsigned int len = qdisc_pkt_len(skb);
246 if (qdisc_pkt_len(skb) > q->max_size) {
247 if (skb_is_gso(skb) &&
248 skb_gso_validate_mac_len(skb, q->max_size))
249 return tbf_segment(skb, sch, to_free);
250 return qdisc_drop(skb, sch, to_free);
252 ret = qdisc_enqueue(skb, q->qdisc, to_free);
253 if (ret != NET_XMIT_SUCCESS) {
254 if (net_xmit_drop_count(ret))
255 qdisc_qstats_drop(sch);
259 sch->qstats.backlog += len;
261 return NET_XMIT_SUCCESS;
264 static bool tbf_peak_present(const struct tbf_sched_data *q)
266 return q->peak.rate_bytes_ps;
269 static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
271 struct tbf_sched_data *q = qdisc_priv(sch);
274 skb = q->qdisc->ops->peek(q->qdisc);
280 unsigned int len = qdisc_pkt_len(skb);
282 now = ktime_get_ns();
283 toks = min_t(s64, now - q->t_c, q->buffer);
285 if (tbf_peak_present(q)) {
286 ptoks = toks + q->ptokens;
289 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
292 if (toks > q->buffer)
294 toks -= (s64) psched_l2t_ns(&q->rate, len);
296 if ((toks|ptoks) >= 0) {
297 skb = qdisc_dequeue_peeked(q->qdisc);
304 qdisc_qstats_backlog_dec(sch, skb);
306 qdisc_bstats_update(sch, skb);
310 qdisc_watchdog_schedule_ns(&q->watchdog,
311 now + max_t(long, -toks, -ptoks));
313 /* Maybe we have a shorter packet in the queue,
314 which can be sent now. It sounds cool,
315 but, however, this is wrong in principle.
316 We MUST NOT reorder packets under these circumstances.
318 Really, if we split the flow into independent
319 subflows, it would be a very good solution.
320 This is the main idea of all FQ algorithms
321 (cf. CSZ, HPFQ, HFSC)
324 qdisc_qstats_overlimit(sch);
329 static void tbf_reset(struct Qdisc *sch)
331 struct tbf_sched_data *q = qdisc_priv(sch);
333 qdisc_reset(q->qdisc);
334 q->t_c = ktime_get_ns();
335 q->tokens = q->buffer;
337 qdisc_watchdog_cancel(&q->watchdog);
340 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
341 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
342 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
343 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
344 [TCA_TBF_RATE64] = { .type = NLA_U64 },
345 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
346 [TCA_TBF_BURST] = { .type = NLA_U32 },
347 [TCA_TBF_PBURST] = { .type = NLA_U32 },
350 static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
351 struct netlink_ext_ack *extack)
354 struct tbf_sched_data *q = qdisc_priv(sch);
355 struct nlattr *tb[TCA_TBF_MAX + 1];
356 struct tc_tbf_qopt *qopt;
357 struct Qdisc *child = NULL;
358 struct Qdisc *old = NULL;
359 struct psched_ratecfg rate;
360 struct psched_ratecfg peak;
363 u64 rate64 = 0, prate64 = 0;
365 err = nla_parse_nested_deprecated(tb, TCA_TBF_MAX, opt, tbf_policy,
371 if (tb[TCA_TBF_PARMS] == NULL)
374 qopt = nla_data(tb[TCA_TBF_PARMS]);
375 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
376 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
380 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
381 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
385 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
386 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
388 if (tb[TCA_TBF_RATE64])
389 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
390 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
392 if (tb[TCA_TBF_BURST]) {
393 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
394 buffer = psched_l2t_ns(&rate, max_size);
396 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
399 if (qopt->peakrate.rate) {
400 if (tb[TCA_TBF_PRATE64])
401 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
402 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
403 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
404 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
405 peak.rate_bytes_ps, rate.rate_bytes_ps);
410 if (tb[TCA_TBF_PBURST]) {
411 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
412 max_size = min_t(u32, max_size, pburst);
413 mtu = psched_l2t_ns(&peak, pburst);
415 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
418 memset(&peak, 0, sizeof(peak));
421 if (max_size < psched_mtu(qdisc_dev(sch)))
422 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
423 max_size, qdisc_dev(sch)->name,
424 psched_mtu(qdisc_dev(sch)));
431 if (q->qdisc != &noop_qdisc) {
432 err = fifo_set_limit(q->qdisc, qopt->limit);
435 } else if (qopt->limit > 0) {
436 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
439 err = PTR_ERR(child);
443 /* child is fifo, no need to check for noop_qdisc */
444 qdisc_hash_add(child, true);
449 qdisc_tree_flush_backlog(q->qdisc);
453 q->limit = qopt->limit;
454 if (tb[TCA_TBF_PBURST])
457 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
458 q->max_size = max_size;
459 if (tb[TCA_TBF_BURST])
462 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
463 q->tokens = q->buffer;
466 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
467 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
469 sch_tree_unlock(sch);
473 tbf_offload_change(sch);
478 static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
479 struct netlink_ext_ack *extack)
481 struct tbf_sched_data *q = qdisc_priv(sch);
483 qdisc_watchdog_init(&q->watchdog, sch);
484 q->qdisc = &noop_qdisc;
489 q->t_c = ktime_get_ns();
491 return tbf_change(sch, opt, extack);
494 static void tbf_destroy(struct Qdisc *sch)
496 struct tbf_sched_data *q = qdisc_priv(sch);
498 qdisc_watchdog_cancel(&q->watchdog);
499 tbf_offload_destroy(sch);
503 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
505 struct tbf_sched_data *q = qdisc_priv(sch);
507 struct tc_tbf_qopt opt;
510 err = tbf_offload_dump(sch);
514 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
516 goto nla_put_failure;
518 opt.limit = q->limit;
519 psched_ratecfg_getrate(&opt.rate, &q->rate);
520 if (tbf_peak_present(q))
521 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
523 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
524 opt.mtu = PSCHED_NS2TICKS(q->mtu);
525 opt.buffer = PSCHED_NS2TICKS(q->buffer);
526 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
527 goto nla_put_failure;
528 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
529 nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
531 goto nla_put_failure;
532 if (tbf_peak_present(q) &&
533 q->peak.rate_bytes_ps >= (1ULL << 32) &&
534 nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
536 goto nla_put_failure;
538 return nla_nest_end(skb, nest);
541 nla_nest_cancel(skb, nest);
545 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
546 struct sk_buff *skb, struct tcmsg *tcm)
548 struct tbf_sched_data *q = qdisc_priv(sch);
550 tcm->tcm_handle |= TC_H_MIN(1);
551 tcm->tcm_info = q->qdisc->handle;
556 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
557 struct Qdisc **old, struct netlink_ext_ack *extack)
559 struct tbf_sched_data *q = qdisc_priv(sch);
564 *old = qdisc_replace(sch, new, &q->qdisc);
566 tbf_offload_graft(sch, new, *old, extack);
570 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
572 struct tbf_sched_data *q = qdisc_priv(sch);
576 static unsigned long tbf_find(struct Qdisc *sch, u32 classid)
581 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
584 tc_qdisc_stats_dump(sch, 1, walker);
588 static const struct Qdisc_class_ops tbf_class_ops = {
593 .dump = tbf_dump_class,
596 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
598 .cl_ops = &tbf_class_ops,
600 .priv_size = sizeof(struct tbf_sched_data),
601 .enqueue = tbf_enqueue,
602 .dequeue = tbf_dequeue,
603 .peek = qdisc_peek_dequeued,
606 .destroy = tbf_destroy,
607 .change = tbf_change,
609 .owner = THIS_MODULE,
612 static int __init tbf_module_init(void)
614 return register_qdisc(&tbf_qdisc_ops);
617 static void __exit tbf_module_exit(void)
619 unregister_qdisc(&tbf_qdisc_ops);
621 module_init(tbf_module_init)
622 module_exit(tbf_module_exit)
623 MODULE_LICENSE("GPL");
624 MODULE_DESCRIPTION("Token Bucket Filter qdisc");