1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 /* COMMON Applications Kept Enhanced (CAKE) discipline
5 * Copyright (C) 2014-2018 Jonathan Morton <chromatix99@gmail.com>
6 * Copyright (C) 2015-2018 Toke Høiland-Jørgensen <toke@toke.dk>
7 * Copyright (C) 2014-2018 Dave Täht <dave.taht@gmail.com>
8 * Copyright (C) 2015-2018 Sebastian Moeller <moeller0@gmx.de>
9 * (C) 2015-2018 Kevin Darbyshire-Bryant <kevin@darbyshire-bryant.me.uk>
10 * Copyright (C) 2017-2018 Ryan Mounce <ryan@mounce.com.au>
12 * The CAKE Principles:
13 * (or, how to have your cake and eat it too)
15 * This is a combination of several shaping, AQM and FQ techniques into one
16 * easy-to-use package:
18 * - An overall bandwidth shaper, to move the bottleneck away from dumb CPE
19 * equipment and bloated MACs. This operates in deficit mode (as in sch_fq),
20 * eliminating the need for any sort of burst parameter (eg. token bucket
21 * depth). Burst support is limited to that necessary to overcome scheduling
24 * - A Diffserv-aware priority queue, giving more priority to certain classes,
25 * up to a specified fraction of bandwidth. Above that bandwidth threshold,
26 * the priority is reduced to avoid starving other tins.
28 * - Each priority tin has a separate Flow Queue system, to isolate traffic
29 * flows from each other. This prevents a burst on one flow from increasing
30 * the delay to another. Flows are distributed to queues using a
31 * set-associative hash function.
33 * - Each queue is actively managed by Cobalt, which is a combination of the
34 * Codel and Blue AQM algorithms. This serves flows fairly, and signals
35 * congestion early via ECN (if available) and/or packet drops, to keep
36 * latency low. The codel parameters are auto-tuned based on the bandwidth
37 * setting, as is necessary at low bandwidths.
39 * The configuration parameters are kept deliberately simple for ease of use.
40 * Everything has sane defaults. Complete generality of configuration is *not*
43 * The priority queue operates according to a weighted DRR scheme, combined with
44 * a bandwidth tracker which reuses the shaper logic to detect which side of the
45 * bandwidth sharing threshold the tin is operating. This determines whether a
46 * priority-based weight (high) or a bandwidth-based weight (low) is used for
47 * that tin in the current pass.
49 * This qdisc was inspired by Eric Dumazet's fq_codel code, which he kindly
50 * granted us permission to leverage.
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/kernel.h>
56 #include <linux/jiffies.h>
57 #include <linux/string.h>
59 #include <linux/errno.h>
60 #include <linux/init.h>
61 #include <linux/skbuff.h>
62 #include <linux/jhash.h>
63 #include <linux/slab.h>
64 #include <linux/vmalloc.h>
65 #include <linux/reciprocal_div.h>
66 #include <net/netlink.h>
67 #include <linux/if_vlan.h>
68 #include <net/pkt_sched.h>
69 #include <net/pkt_cls.h>
71 #include <net/flow_dissector.h>
73 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
74 #include <net/netfilter/nf_conntrack_core.h>
77 #define CAKE_SET_WAYS (8)
78 #define CAKE_MAX_TINS (8)
79 #define CAKE_QUEUES (1024)
80 #define CAKE_FLOW_MASK 63
81 #define CAKE_FLOW_NAT_FLAG 64
83 /* struct cobalt_params - contains codel and blue parameters
84 * @interval: codel initial drop rate
85 * @target: maximum persistent sojourn time & blue update rate
86 * @mtu_time: serialisation delay of maximum-size packet
87 * @p_inc: increment of blue drop probability (0.32 fxp)
88 * @p_dec: decrement of blue drop probability (0.32 fxp)
90 struct cobalt_params {
98 /* struct cobalt_vars - contains codel and blue variables
99 * @count: codel dropping frequency
100 * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
101 * @drop_next: time to drop next packet, or when we dropped last
102 * @blue_timer: Blue time to next drop
103 * @p_drop: BLUE drop probability (0.32 fxp)
104 * @dropping: set if in dropping state
105 * @ecn_marked: set if marked
120 CAKE_SET_SPARSE_WAIT, /* counted in SPARSE, actually in BULK */
126 /* this stuff is all needed per-flow at dequeue time */
127 struct sk_buff *head;
128 struct sk_buff *tail;
129 struct list_head flowchain;
132 struct cobalt_vars cvars;
133 u16 srchost; /* index into cake_host table */
136 }; /* please try to keep this structure <= 64 bytes */
141 u16 srchost_bulk_flow_count;
142 u16 dsthost_bulk_flow_count;
145 struct cake_heap_entry {
149 struct cake_tin_data {
150 struct cake_flow flows[CAKE_QUEUES];
151 u32 backlogs[CAKE_QUEUES];
152 u32 tags[CAKE_QUEUES]; /* for set association */
153 u16 overflow_idx[CAKE_QUEUES];
154 struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */
157 struct cobalt_params cparams;
160 u16 sparse_flow_count;
161 u16 decaying_flow_count;
162 u16 unresponsive_flow_count;
166 struct list_head new_flows;
167 struct list_head old_flows;
168 struct list_head decaying_flows;
170 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
171 ktime_t time_next_packet;
176 u16 tin_quantum_prio;
177 u16 tin_quantum_band;
188 /* moving averages */
193 /* hash function stats */
198 }; /* number of tins is small, so size of this struct doesn't matter much */
200 struct cake_sched_data {
201 struct tcf_proto __rcu *filter_list; /* optional external classifier */
202 struct tcf_block *block;
203 struct cake_tin_data *tins;
205 struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
206 u16 overflow_timeout;
217 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
219 ktime_t time_next_packet;
220 ktime_t failsafe_next_packet;
229 /* resource tracking */
233 u32 buffer_config_limit;
235 /* indices for dequeue */
239 struct qdisc_watchdog watchdog;
243 /* bandwidth capacity estimate */
244 ktime_t last_packet_time;
245 ktime_t avg_window_begin;
246 u64 avg_packet_interval;
247 u64 avg_window_bytes;
248 u64 avg_peak_bandwidth;
249 ktime_t last_reconfig_time;
251 /* packet length stats */
260 CAKE_FLAG_OVERHEAD = BIT(0),
261 CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
262 CAKE_FLAG_INGRESS = BIT(2),
263 CAKE_FLAG_WASH = BIT(3),
264 CAKE_FLAG_SPLIT_GSO = BIT(4)
267 /* COBALT operates the Codel and BLUE algorithms in parallel, in order to
268 * obtain the best features of each. Codel is excellent on flows which
269 * respond to congestion signals in a TCP-like way. BLUE is more effective on
270 * unresponsive flows.
273 struct cobalt_skb_cb {
274 ktime_t enqueue_time;
278 static u64 us_to_ns(u64 us)
280 return us * NSEC_PER_USEC;
283 static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb)
285 qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb));
286 return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data;
289 static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb)
291 return get_cobalt_cb(skb)->enqueue_time;
294 static void cobalt_set_enqueue_time(struct sk_buff *skb,
297 get_cobalt_cb(skb)->enqueue_time = now;
300 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
302 /* Diffserv lookup tables */
304 static const u8 precedence[] = {
305 0, 0, 0, 0, 0, 0, 0, 0,
306 1, 1, 1, 1, 1, 1, 1, 1,
307 2, 2, 2, 2, 2, 2, 2, 2,
308 3, 3, 3, 3, 3, 3, 3, 3,
309 4, 4, 4, 4, 4, 4, 4, 4,
310 5, 5, 5, 5, 5, 5, 5, 5,
311 6, 6, 6, 6, 6, 6, 6, 6,
312 7, 7, 7, 7, 7, 7, 7, 7,
315 static const u8 diffserv8[] = {
316 2, 5, 1, 2, 4, 2, 2, 2,
317 0, 2, 1, 2, 1, 2, 1, 2,
318 5, 2, 4, 2, 4, 2, 4, 2,
319 3, 2, 3, 2, 3, 2, 3, 2,
320 6, 2, 3, 2, 3, 2, 3, 2,
321 6, 2, 2, 2, 6, 2, 6, 2,
322 7, 2, 2, 2, 2, 2, 2, 2,
323 7, 2, 2, 2, 2, 2, 2, 2,
326 static const u8 diffserv4[] = {
327 0, 2, 0, 0, 2, 0, 0, 0,
328 1, 0, 0, 0, 0, 0, 0, 0,
329 2, 0, 2, 0, 2, 0, 2, 0,
330 2, 0, 2, 0, 2, 0, 2, 0,
331 3, 0, 2, 0, 2, 0, 2, 0,
332 3, 0, 0, 0, 3, 0, 3, 0,
333 3, 0, 0, 0, 0, 0, 0, 0,
334 3, 0, 0, 0, 0, 0, 0, 0,
337 static const u8 diffserv3[] = {
338 0, 0, 0, 0, 2, 0, 0, 0,
339 1, 0, 0, 0, 0, 0, 0, 0,
340 0, 0, 0, 0, 0, 0, 0, 0,
341 0, 0, 0, 0, 0, 0, 0, 0,
342 0, 0, 0, 0, 0, 0, 0, 0,
343 0, 0, 0, 0, 2, 0, 2, 0,
344 2, 0, 0, 0, 0, 0, 0, 0,
345 2, 0, 0, 0, 0, 0, 0, 0,
348 static const u8 besteffort[] = {
349 0, 0, 0, 0, 0, 0, 0, 0,
350 0, 0, 0, 0, 0, 0, 0, 0,
351 0, 0, 0, 0, 0, 0, 0, 0,
352 0, 0, 0, 0, 0, 0, 0, 0,
353 0, 0, 0, 0, 0, 0, 0, 0,
354 0, 0, 0, 0, 0, 0, 0, 0,
355 0, 0, 0, 0, 0, 0, 0, 0,
356 0, 0, 0, 0, 0, 0, 0, 0,
359 /* tin priority order for stats dumping */
361 static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
362 static const u8 bulk_order[] = {1, 0, 2, 3};
364 #define REC_INV_SQRT_CACHE (16)
365 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
367 /* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
368 * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
370 * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
373 static void cobalt_newton_step(struct cobalt_vars *vars)
375 u32 invsqrt, invsqrt2;
378 invsqrt = vars->rec_inv_sqrt;
379 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
380 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
382 val >>= 2; /* avoid overflow in following multiply */
383 val = (val * invsqrt) >> (32 - 2 + 1);
385 vars->rec_inv_sqrt = val;
388 static void cobalt_invsqrt(struct cobalt_vars *vars)
390 if (vars->count < REC_INV_SQRT_CACHE)
391 vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
393 cobalt_newton_step(vars);
396 /* There is a big difference in timing between the accurate values placed in
397 * the cache and the approximations given by a single Newton step for small
398 * count values, particularly when stepping from count 1 to 2 or vice versa.
399 * Above 16, a single Newton step gives sufficient accuracy in either
400 * direction, given the precision stored.
402 * The magnitude of the error when stepping up to count 2 is such as to give
403 * the value that *should* have been produced at count 4.
406 static void cobalt_cache_init(void)
408 struct cobalt_vars v;
410 memset(&v, 0, sizeof(v));
411 v.rec_inv_sqrt = ~0U;
412 cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
414 for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
415 cobalt_newton_step(&v);
416 cobalt_newton_step(&v);
417 cobalt_newton_step(&v);
418 cobalt_newton_step(&v);
420 cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt;
424 static void cobalt_vars_init(struct cobalt_vars *vars)
426 memset(vars, 0, sizeof(*vars));
428 if (!cobalt_rec_inv_sqrt_cache[0]) {
430 cobalt_rec_inv_sqrt_cache[0] = ~0;
434 /* CoDel control_law is t + interval/sqrt(count)
435 * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
436 * both sqrt() and divide operation.
438 static ktime_t cobalt_control(ktime_t t,
442 return ktime_add_ns(t, reciprocal_scale(interval,
446 /* Call this when a packet had to be dropped due to queue overflow. Returns
447 * true if the BLUE state was quiescent before but active after this call.
449 static bool cobalt_queue_full(struct cobalt_vars *vars,
450 struct cobalt_params *p,
455 if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
457 vars->p_drop += p->p_inc;
458 if (vars->p_drop < p->p_inc)
460 vars->blue_timer = now;
462 vars->dropping = true;
463 vars->drop_next = now;
470 /* Call this when the queue was serviced but turned out to be empty. Returns
471 * true if the BLUE state was active before but quiescent after this call.
473 static bool cobalt_queue_empty(struct cobalt_vars *vars,
474 struct cobalt_params *p,
480 ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
481 if (vars->p_drop < p->p_dec)
484 vars->p_drop -= p->p_dec;
485 vars->blue_timer = now;
486 down = !vars->p_drop;
488 vars->dropping = false;
490 if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) {
492 cobalt_invsqrt(vars);
493 vars->drop_next = cobalt_control(vars->drop_next,
501 /* Call this with a freshly dequeued packet for possible congestion marking.
502 * Returns true as an instruction to drop the packet, false for delivery.
504 static bool cobalt_should_drop(struct cobalt_vars *vars,
505 struct cobalt_params *p,
510 bool next_due, over_target, drop = false;
514 /* The 'schedule' variable records, in its sign, whether 'now' is before or
515 * after 'drop_next'. This allows 'drop_next' to be updated before the next
516 * scheduling decision is actually branched, without destroying that
517 * information. Similarly, the first 'schedule' value calculated is preserved
518 * in the boolean 'next_due'.
520 * As for 'drop_next', we take advantage of the fact that 'interval' is both
521 * the delay between first exceeding 'target' and the first signalling event,
522 * *and* the scaling factor for the signalling frequency. It's therefore very
523 * natural to use a single mechanism for both purposes, and eliminates a
524 * significant amount of reference Codel's spaghetti code. To help with this,
525 * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close
526 * as possible to 1.0 in fixed-point.
529 sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
530 schedule = ktime_sub(now, vars->drop_next);
531 over_target = sojourn > p->target &&
532 sojourn > p->mtu_time * bulk_flows * 2 &&
533 sojourn > p->mtu_time * 4;
534 next_due = vars->count && ktime_to_ns(schedule) >= 0;
536 vars->ecn_marked = false;
539 if (!vars->dropping) {
540 vars->dropping = true;
541 vars->drop_next = cobalt_control(now,
547 } else if (vars->dropping) {
548 vars->dropping = false;
551 if (next_due && vars->dropping) {
552 /* Use ECN mark if possible, otherwise drop */
553 drop = !(vars->ecn_marked = INET_ECN_set_ce(skb));
558 cobalt_invsqrt(vars);
559 vars->drop_next = cobalt_control(vars->drop_next,
562 schedule = ktime_sub(now, vars->drop_next);
566 cobalt_invsqrt(vars);
567 vars->drop_next = cobalt_control(vars->drop_next,
570 schedule = ktime_sub(now, vars->drop_next);
571 next_due = vars->count && ktime_to_ns(schedule) >= 0;
575 /* Simple BLUE implementation. Lack of ECN is deliberate. */
577 drop |= (prandom_u32() < vars->p_drop);
579 /* Overload the drop_next field as an activity timeout */
581 vars->drop_next = ktime_add_ns(now, p->interval);
582 else if (ktime_to_ns(schedule) > 0 && !drop)
583 vars->drop_next = now;
588 static void cake_update_flowkeys(struct flow_keys *keys,
589 const struct sk_buff *skb)
591 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
592 struct nf_conntrack_tuple tuple = {};
593 bool rev = !skb->_nfct;
595 if (skb_protocol(skb, true) != htons(ETH_P_IP))
598 if (!nf_ct_get_tuple_skb(&tuple, skb))
601 keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
602 keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
604 if (keys->ports.ports) {
605 keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all;
606 keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all;
611 /* Cake has several subtle multiple bit settings. In these cases you
612 * would be matching triple isolate mode as well.
615 static bool cake_dsrc(int flow_mode)
617 return (flow_mode & CAKE_FLOW_DUAL_SRC) == CAKE_FLOW_DUAL_SRC;
620 static bool cake_ddst(int flow_mode)
622 return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
625 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
626 int flow_mode, u16 flow_override, u16 host_override)
628 u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
629 u16 reduced_hash, srchost_idx, dsthost_idx;
630 struct flow_keys keys, host_keys;
632 if (unlikely(flow_mode == CAKE_FLOW_NONE))
635 /* If both overrides are set we can skip packet dissection entirely */
636 if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) &&
637 (host_override || !(flow_mode & CAKE_FLOW_HOSTS)))
640 skb_flow_dissect_flow_keys(skb, &keys,
641 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
643 if (flow_mode & CAKE_FLOW_NAT_FLAG)
644 cake_update_flowkeys(&keys, skb);
646 /* flow_hash_from_keys() sorts the addresses by value, so we have
647 * to preserve their order in a separate data structure to treat
648 * src and dst host addresses as independently selectable.
651 host_keys.ports.ports = 0;
652 host_keys.basic.ip_proto = 0;
653 host_keys.keyid.keyid = 0;
654 host_keys.tags.flow_label = 0;
656 switch (host_keys.control.addr_type) {
657 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
658 host_keys.addrs.v4addrs.src = 0;
659 dsthost_hash = flow_hash_from_keys(&host_keys);
660 host_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
661 host_keys.addrs.v4addrs.dst = 0;
662 srchost_hash = flow_hash_from_keys(&host_keys);
665 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
666 memset(&host_keys.addrs.v6addrs.src, 0,
667 sizeof(host_keys.addrs.v6addrs.src));
668 dsthost_hash = flow_hash_from_keys(&host_keys);
669 host_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
670 memset(&host_keys.addrs.v6addrs.dst, 0,
671 sizeof(host_keys.addrs.v6addrs.dst));
672 srchost_hash = flow_hash_from_keys(&host_keys);
680 /* This *must* be after the above switch, since as a
681 * side-effect it sorts the src and dst addresses.
683 if (flow_mode & CAKE_FLOW_FLOWS)
684 flow_hash = flow_hash_from_keys(&keys);
688 flow_hash = flow_override - 1;
690 dsthost_hash = host_override - 1;
691 srchost_hash = host_override - 1;
694 if (!(flow_mode & CAKE_FLOW_FLOWS)) {
695 if (flow_mode & CAKE_FLOW_SRC_IP)
696 flow_hash ^= srchost_hash;
698 if (flow_mode & CAKE_FLOW_DST_IP)
699 flow_hash ^= dsthost_hash;
702 reduced_hash = flow_hash % CAKE_QUEUES;
704 /* set-associative hashing */
705 /* fast path if no hash collision (direct lookup succeeds) */
706 if (likely(q->tags[reduced_hash] == flow_hash &&
707 q->flows[reduced_hash].set)) {
710 u32 inner_hash = reduced_hash % CAKE_SET_WAYS;
711 u32 outer_hash = reduced_hash - inner_hash;
712 bool allocate_src = false;
713 bool allocate_dst = false;
716 /* check if any active queue in the set is reserved for
719 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
720 i++, k = (k + 1) % CAKE_SET_WAYS) {
721 if (q->tags[outer_hash + k] == flow_hash) {
725 if (!q->flows[outer_hash + k].set) {
726 /* need to increment host refcnts */
727 allocate_src = cake_dsrc(flow_mode);
728 allocate_dst = cake_ddst(flow_mode);
735 /* no queue is reserved for this flow, look for an
738 for (i = 0; i < CAKE_SET_WAYS;
739 i++, k = (k + 1) % CAKE_SET_WAYS) {
740 if (!q->flows[outer_hash + k].set) {
742 allocate_src = cake_dsrc(flow_mode);
743 allocate_dst = cake_ddst(flow_mode);
748 /* With no empty queues, default to the original
749 * queue, accept the collision, update the host tags.
752 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
753 q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
754 q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
756 allocate_src = cake_dsrc(flow_mode);
757 allocate_dst = cake_ddst(flow_mode);
759 /* reserve queue for future packets in same flow */
760 reduced_hash = outer_hash + k;
761 q->tags[reduced_hash] = flow_hash;
764 srchost_idx = srchost_hash % CAKE_QUEUES;
765 inner_hash = srchost_idx % CAKE_SET_WAYS;
766 outer_hash = srchost_idx - inner_hash;
767 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
768 i++, k = (k + 1) % CAKE_SET_WAYS) {
769 if (q->hosts[outer_hash + k].srchost_tag ==
773 for (i = 0; i < CAKE_SET_WAYS;
774 i++, k = (k + 1) % CAKE_SET_WAYS) {
775 if (!q->hosts[outer_hash + k].srchost_bulk_flow_count)
778 q->hosts[outer_hash + k].srchost_tag = srchost_hash;
780 srchost_idx = outer_hash + k;
781 if (q->flows[reduced_hash].set == CAKE_SET_BULK)
782 q->hosts[srchost_idx].srchost_bulk_flow_count++;
783 q->flows[reduced_hash].srchost = srchost_idx;
787 dsthost_idx = dsthost_hash % CAKE_QUEUES;
788 inner_hash = dsthost_idx % CAKE_SET_WAYS;
789 outer_hash = dsthost_idx - inner_hash;
790 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
791 i++, k = (k + 1) % CAKE_SET_WAYS) {
792 if (q->hosts[outer_hash + k].dsthost_tag ==
796 for (i = 0; i < CAKE_SET_WAYS;
797 i++, k = (k + 1) % CAKE_SET_WAYS) {
798 if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count)
801 q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
803 dsthost_idx = outer_hash + k;
804 if (q->flows[reduced_hash].set == CAKE_SET_BULK)
805 q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
806 q->flows[reduced_hash].dsthost = dsthost_idx;
813 /* helper functions : might be changed when/if skb use a standard list_head */
814 /* remove one skb from head of slot queue */
816 static struct sk_buff *dequeue_head(struct cake_flow *flow)
818 struct sk_buff *skb = flow->head;
821 flow->head = skb->next;
822 skb_mark_not_on_list(skb);
828 /* add skb to flow queue (tail add) */
830 static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb)
835 flow->tail->next = skb;
840 static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
843 unsigned int offset = skb_network_offset(skb);
846 iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
851 if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
852 return skb_header_pointer(skb, offset + iph->ihl * 4,
853 sizeof(struct ipv6hdr), buf);
855 else if (iph->version == 4)
858 else if (iph->version == 6)
859 return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
865 static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
866 void *buf, unsigned int bufsize)
868 unsigned int offset = skb_network_offset(skb);
869 const struct ipv6hdr *ipv6h;
870 const struct tcphdr *tcph;
871 const struct iphdr *iph;
872 struct ipv6hdr _ipv6h;
875 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
880 if (ipv6h->version == 4) {
881 iph = (struct iphdr *)ipv6h;
882 offset += iph->ihl * 4;
884 /* special-case 6in4 tunnelling, as that is a common way to get
885 * v6 connectivity in the home
887 if (iph->protocol == IPPROTO_IPV6) {
888 ipv6h = skb_header_pointer(skb, offset,
889 sizeof(_ipv6h), &_ipv6h);
891 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
894 offset += sizeof(struct ipv6hdr);
896 } else if (iph->protocol != IPPROTO_TCP) {
900 } else if (ipv6h->version == 6) {
901 if (ipv6h->nexthdr != IPPROTO_TCP)
904 offset += sizeof(struct ipv6hdr);
909 tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
910 if (!tcph || tcph->doff < 5)
913 return skb_header_pointer(skb, offset,
914 min(__tcp_hdrlen(tcph), bufsize), buf);
917 static const void *cake_get_tcpopt(const struct tcphdr *tcph,
918 int code, int *oplen)
920 /* inspired by tcp_parse_options in tcp_input.c */
921 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
922 const u8 *ptr = (const u8 *)(tcph + 1);
928 if (opcode == TCPOPT_EOL)
930 if (opcode == TCPOPT_NOP) {
937 if (opsize < 2 || opsize > length)
940 if (opcode == code) {
952 /* Compare two SACK sequences. A sequence is considered greater if it SACKs more
953 * bytes than the other. In the case where both sequences ACKs bytes that the
954 * other doesn't, A is considered greater. DSACKs in A also makes A be
955 * considered greater.
957 * @return -1, 0 or 1 as normal compare functions
959 static int cake_tcph_sack_compare(const struct tcphdr *tcph_a,
960 const struct tcphdr *tcph_b)
962 const struct tcp_sack_block_wire *sack_a, *sack_b;
963 u32 ack_seq_a = ntohl(tcph_a->ack_seq);
964 u32 bytes_a = 0, bytes_b = 0;
965 int oplen_a, oplen_b;
968 sack_a = cake_get_tcpopt(tcph_a, TCPOPT_SACK, &oplen_a);
969 sack_b = cake_get_tcpopt(tcph_b, TCPOPT_SACK, &oplen_b);
971 /* pointers point to option contents */
972 oplen_a -= TCPOLEN_SACK_BASE;
973 oplen_b -= TCPOLEN_SACK_BASE;
975 if (sack_a && oplen_a >= sizeof(*sack_a) &&
976 (!sack_b || oplen_b < sizeof(*sack_b)))
978 else if (sack_b && oplen_b >= sizeof(*sack_b) &&
979 (!sack_a || oplen_a < sizeof(*sack_a)))
981 else if ((!sack_a || oplen_a < sizeof(*sack_a)) &&
982 (!sack_b || oplen_b < sizeof(*sack_b)))
985 while (oplen_a >= sizeof(*sack_a)) {
986 const struct tcp_sack_block_wire *sack_tmp = sack_b;
987 u32 start_a = get_unaligned_be32(&sack_a->start_seq);
988 u32 end_a = get_unaligned_be32(&sack_a->end_seq);
989 int oplen_tmp = oplen_b;
992 /* DSACK; always considered greater to prevent dropping */
993 if (before(start_a, ack_seq_a))
996 bytes_a += end_a - start_a;
998 while (oplen_tmp >= sizeof(*sack_tmp)) {
999 u32 start_b = get_unaligned_be32(&sack_tmp->start_seq);
1000 u32 end_b = get_unaligned_be32(&sack_tmp->end_seq);
1002 /* first time through we count the total size */
1004 bytes_b += end_b - start_b;
1006 if (!after(start_b, start_a) && !before(end_b, end_a)) {
1011 oplen_tmp -= sizeof(*sack_tmp);
1018 oplen_a -= sizeof(*sack_a);
1023 /* If we made it this far, all ranges SACKed by A are covered by B, so
1024 * either the SACKs are equal, or B SACKs more bytes.
1026 return bytes_b > bytes_a ? 1 : 0;
1029 static void cake_tcph_get_tstamp(const struct tcphdr *tcph,
1030 u32 *tsval, u32 *tsecr)
1035 ptr = cake_get_tcpopt(tcph, TCPOPT_TIMESTAMP, &opsize);
1037 if (ptr && opsize == TCPOLEN_TIMESTAMP) {
1038 *tsval = get_unaligned_be32(ptr);
1039 *tsecr = get_unaligned_be32(ptr + 4);
1043 static bool cake_tcph_may_drop(const struct tcphdr *tcph,
1044 u32 tstamp_new, u32 tsecr_new)
1046 /* inspired by tcp_parse_options in tcp_input.c */
1047 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
1048 const u8 *ptr = (const u8 *)(tcph + 1);
1051 /* 3 reserved flags must be unset to avoid future breakage
1053 * ECE/CWR are handled separately
1054 * All other flags URG/PSH/RST/SYN/FIN must be unset
1055 * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero)
1056 * 0x00C00000 = CWR/ECE (handled separately)
1057 * 0x0F3F0000 = 0x0FFF0000 & ~0x00C00000
1059 if (((tcp_flag_word(tcph) &
1060 cpu_to_be32(0x0F3F0000)) != TCP_FLAG_ACK))
1063 while (length > 0) {
1064 int opcode = *ptr++;
1067 if (opcode == TCPOPT_EOL)
1069 if (opcode == TCPOPT_NOP) {
1076 if (opsize < 2 || opsize > length)
1080 case TCPOPT_MD5SIG: /* doesn't influence state */
1083 case TCPOPT_SACK: /* stricter checking performed later */
1084 if (opsize % 8 != 2)
1088 case TCPOPT_TIMESTAMP:
1089 /* only drop timestamps lower than new */
1090 if (opsize != TCPOLEN_TIMESTAMP)
1092 tstamp = get_unaligned_be32(ptr);
1093 tsecr = get_unaligned_be32(ptr + 4);
1094 if (after(tstamp, tstamp_new) ||
1095 after(tsecr, tsecr_new))
1099 case TCPOPT_MSS: /* these should only be set on SYN */
1101 case TCPOPT_SACK_PERM:
1102 case TCPOPT_FASTOPEN:
1104 default: /* don't drop if any unknown options are present */
1115 static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
1116 struct cake_flow *flow)
1118 bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
1119 struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL;
1120 struct sk_buff *skb_check, *skb_prev = NULL;
1121 const struct ipv6hdr *ipv6h, *ipv6h_check;
1122 unsigned char _tcph[64], _tcph_check[64];
1123 const struct tcphdr *tcph, *tcph_check;
1124 const struct iphdr *iph, *iph_check;
1125 struct ipv6hdr _iph, _iph_check;
1126 const struct sk_buff *skb;
1127 int seglen, num_found = 0;
1128 u32 tstamp = 0, tsecr = 0;
1129 __be32 elig_flags = 0;
1132 /* no other possible ACKs to filter */
1133 if (flow->head == flow->tail)
1137 tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph));
1138 iph = cake_get_iphdr(skb, &_iph);
1142 cake_tcph_get_tstamp(tcph, &tstamp, &tsecr);
1144 /* the 'triggering' packet need only have the ACK flag set.
1145 * also check that SYN is not set, as there won't be any previous ACKs.
1147 if ((tcp_flag_word(tcph) &
1148 (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK)
1151 /* the 'triggering' ACK is at the tail of the queue, we have already
1152 * returned if it is the only packet in the flow. loop through the rest
1153 * of the queue looking for pure ACKs with the same 5-tuple as the
1156 for (skb_check = flow->head;
1157 skb_check && skb_check != skb;
1158 skb_prev = skb_check, skb_check = skb_check->next) {
1159 iph_check = cake_get_iphdr(skb_check, &_iph_check);
1160 tcph_check = cake_get_tcphdr(skb_check, &_tcph_check,
1161 sizeof(_tcph_check));
1163 /* only TCP packets with matching 5-tuple are eligible, and only
1166 if (!tcph_check || iph->version != iph_check->version ||
1167 tcph_check->source != tcph->source ||
1168 tcph_check->dest != tcph->dest)
1171 if (iph_check->version == 4) {
1172 if (iph_check->saddr != iph->saddr ||
1173 iph_check->daddr != iph->daddr)
1176 seglen = ntohs(iph_check->tot_len) -
1177 (4 * iph_check->ihl);
1178 } else if (iph_check->version == 6) {
1179 ipv6h = (struct ipv6hdr *)iph;
1180 ipv6h_check = (struct ipv6hdr *)iph_check;
1182 if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) ||
1183 ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr))
1186 seglen = ntohs(ipv6h_check->payload_len);
1188 WARN_ON(1); /* shouldn't happen */
1192 /* If the ECE/CWR flags changed from the previous eligible
1193 * packet in the same flow, we should no longer be dropping that
1194 * previous packet as this would lose information.
1196 if (elig_ack && (tcp_flag_word(tcph_check) &
1197 (TCP_FLAG_ECE | TCP_FLAG_CWR)) != elig_flags) {
1199 elig_ack_prev = NULL;
1203 /* Check TCP options and flags, don't drop ACKs with segment
1204 * data, and don't drop ACKs with a higher cumulative ACK
1205 * counter than the triggering packet. Check ACK seqno here to
1206 * avoid parsing SACK options of packets we are going to exclude
1209 if (!cake_tcph_may_drop(tcph_check, tstamp, tsecr) ||
1210 (seglen - __tcp_hdrlen(tcph_check)) != 0 ||
1211 after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq)))
1214 /* Check SACK options. The triggering packet must SACK more data
1215 * than the ACK under consideration, or SACK the same range but
1216 * have a larger cumulative ACK counter. The latter is a
1217 * pathological case, but is contained in the following check
1218 * anyway, just to be safe.
1220 sack_comp = cake_tcph_sack_compare(tcph_check, tcph);
1222 if (sack_comp < 0 ||
1223 (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) &&
1227 /* At this point we have found an eligible pure ACK to drop; if
1228 * we are in aggressive mode, we are done. Otherwise, keep
1229 * searching unless this is the second eligible ACK we
1232 * Since we want to drop ACK closest to the head of the queue,
1233 * save the first eligible ACK we find, even if we need to loop
1237 elig_ack = skb_check;
1238 elig_ack_prev = skb_prev;
1239 elig_flags = (tcp_flag_word(tcph_check)
1240 & (TCP_FLAG_ECE | TCP_FLAG_CWR));
1243 if (num_found++ > 0)
1247 /* We made it through the queue without finding two eligible ACKs . If
1248 * we found a single eligible ACK we can drop it in aggressive mode if
1249 * we can guarantee that this does not interfere with ECN flag
1250 * information. We ensure this by dropping it only if the enqueued
1251 * packet is consecutive with the eligible ACK, and their flags match.
1253 if (elig_ack && aggressive && elig_ack->next == skb &&
1254 (elig_flags == (tcp_flag_word(tcph) &
1255 (TCP_FLAG_ECE | TCP_FLAG_CWR))))
1262 elig_ack_prev->next = elig_ack->next;
1264 flow->head = elig_ack->next;
1266 skb_mark_not_on_list(elig_ack);
1271 static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
1273 avg -= avg >> shift;
1274 avg += sample >> shift;
1278 static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
1280 if (q->rate_flags & CAKE_FLAG_OVERHEAD)
1283 if (q->max_netlen < len)
1284 q->max_netlen = len;
1285 if (q->min_netlen > len)
1286 q->min_netlen = len;
1288 len += q->rate_overhead;
1290 if (len < q->rate_mpu)
1293 if (q->atm_mode == CAKE_ATM_ATM) {
1297 } else if (q->atm_mode == CAKE_ATM_PTM) {
1298 /* Add one byte per 64 bytes or part thereof.
1299 * This is conservative and easier to calculate than the
1302 len += (len + 63) / 64;
1305 if (q->max_adjlen < len)
1306 q->max_adjlen = len;
1307 if (q->min_adjlen > len)
1308 q->min_adjlen = len;
1313 static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
1315 const struct skb_shared_info *shinfo = skb_shinfo(skb);
1316 unsigned int hdr_len, last_len = 0;
1317 u32 off = skb_network_offset(skb);
1318 u32 len = qdisc_pkt_len(skb);
1321 q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
1323 if (!shinfo->gso_size)
1324 return cake_calc_overhead(q, len, off);
1326 /* borrowed from qdisc_pkt_len_init() */
1327 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
1329 /* + transport layer */
1330 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
1332 const struct tcphdr *th;
1333 struct tcphdr _tcphdr;
1335 th = skb_header_pointer(skb, skb_transport_offset(skb),
1336 sizeof(_tcphdr), &_tcphdr);
1338 hdr_len += __tcp_hdrlen(th);
1340 struct udphdr _udphdr;
1342 if (skb_header_pointer(skb, skb_transport_offset(skb),
1343 sizeof(_udphdr), &_udphdr))
1344 hdr_len += sizeof(struct udphdr);
1347 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
1348 segs = DIV_ROUND_UP(skb->len - hdr_len,
1351 segs = shinfo->gso_segs;
1353 len = shinfo->gso_size + hdr_len;
1354 last_len = skb->len - shinfo->gso_size * (segs - 1);
1356 return (cake_calc_overhead(q, len, off) * (segs - 1) +
1357 cake_calc_overhead(q, last_len, off));
1360 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
1362 struct cake_heap_entry ii = q->overflow_heap[i];
1363 struct cake_heap_entry jj = q->overflow_heap[j];
1365 q->overflow_heap[i] = jj;
1366 q->overflow_heap[j] = ii;
1368 q->tins[ii.t].overflow_idx[ii.b] = j;
1369 q->tins[jj.t].overflow_idx[jj.b] = i;
1372 static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
1374 struct cake_heap_entry ii = q->overflow_heap[i];
1376 return q->tins[ii.t].backlogs[ii.b];
1379 static void cake_heapify(struct cake_sched_data *q, u16 i)
1381 static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES;
1382 u32 mb = cake_heap_get_backlog(q, i);
1390 u32 lb = cake_heap_get_backlog(q, l);
1399 u32 rb = cake_heap_get_backlog(q, r);
1408 cake_heap_swap(q, i, m);
1416 static void cake_heapify_up(struct cake_sched_data *q, u16 i)
1418 while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) {
1419 u16 p = (i - 1) >> 1;
1420 u32 ib = cake_heap_get_backlog(q, i);
1421 u32 pb = cake_heap_get_backlog(q, p);
1424 cake_heap_swap(q, i, p);
1432 static int cake_advance_shaper(struct cake_sched_data *q,
1433 struct cake_tin_data *b,
1434 struct sk_buff *skb,
1435 ktime_t now, bool drop)
1437 u32 len = get_cobalt_cb(skb)->adjusted_len;
1439 /* charge packet bandwidth to this tin
1440 * and to the global shaper.
1443 u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft;
1444 u64 global_dur = (len * q->rate_ns) >> q->rate_shft;
1445 u64 failsafe_dur = global_dur + (global_dur >> 1);
1447 if (ktime_before(b->time_next_packet, now))
1448 b->time_next_packet = ktime_add_ns(b->time_next_packet,
1451 else if (ktime_before(b->time_next_packet,
1452 ktime_add_ns(now, tin_dur)))
1453 b->time_next_packet = ktime_add_ns(now, tin_dur);
1455 q->time_next_packet = ktime_add_ns(q->time_next_packet,
1458 q->failsafe_next_packet = \
1459 ktime_add_ns(q->failsafe_next_packet,
1465 static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
1467 struct cake_sched_data *q = qdisc_priv(sch);
1468 ktime_t now = ktime_get();
1469 u32 idx = 0, tin = 0, len;
1470 struct cake_heap_entry qq;
1471 struct cake_tin_data *b;
1472 struct cake_flow *flow;
1473 struct sk_buff *skb;
1475 if (!q->overflow_timeout) {
1477 /* Build fresh max-heap */
1478 for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2; i >= 0; i--)
1481 q->overflow_timeout = 65535;
1483 /* select longest queue for pruning */
1484 qq = q->overflow_heap[0];
1489 flow = &b->flows[idx];
1490 skb = dequeue_head(flow);
1491 if (unlikely(!skb)) {
1492 /* heap has gone wrong, rebuild it next time */
1493 q->overflow_timeout = 0;
1494 return idx + (tin << 16);
1497 if (cobalt_queue_full(&flow->cvars, &b->cparams, now))
1498 b->unresponsive_flow_count++;
1500 len = qdisc_pkt_len(skb);
1501 q->buffer_used -= skb->truesize;
1502 b->backlogs[idx] -= len;
1503 b->tin_backlog -= len;
1504 sch->qstats.backlog -= len;
1505 qdisc_tree_reduce_backlog(sch, 1, len);
1509 sch->qstats.drops++;
1511 if (q->rate_flags & CAKE_FLAG_INGRESS)
1512 cake_advance_shaper(q, b, skb, now, true);
1514 __qdisc_drop(skb, to_free);
1519 return idx + (tin << 16);
1522 static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
1524 const int offset = skb_network_offset(skb);
1528 switch (skb_protocol(skb, true)) {
1529 case htons(ETH_P_IP):
1530 buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
1534 /* ToS is in the second byte of iphdr */
1535 dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
1538 const int wlen = offset + sizeof(struct iphdr);
1540 if (!pskb_may_pull(skb, wlen) ||
1541 skb_try_make_writable(skb, wlen))
1544 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
1549 case htons(ETH_P_IPV6):
1550 buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
1554 /* Traffic class is in the first and second bytes of ipv6hdr */
1555 dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
1558 const int wlen = offset + sizeof(struct ipv6hdr);
1560 if (!pskb_may_pull(skb, wlen) ||
1561 skb_try_make_writable(skb, wlen))
1564 ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
1569 case htons(ETH_P_ARP):
1570 return 0x38; /* CS7 - Net Control */
1573 /* If there is no Diffserv field, treat as best-effort */
1578 static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
1579 struct sk_buff *skb)
1581 struct cake_sched_data *q = qdisc_priv(sch);
1586 /* Tin selection: Default to diffserv-based selection, allow overriding
1587 * using firewall marks or skb->priority. Call DSCP parsing early if
1588 * wash is enabled, otherwise defer to below to skip unneeded parsing.
1590 mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
1591 wash = !!(q->rate_flags & CAKE_FLAG_WASH);
1593 dscp = cake_handle_diffserv(skb, wash);
1595 if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
1598 else if (mark && mark <= q->tin_cnt)
1599 tin = q->tin_order[mark - 1];
1601 else if (TC_H_MAJ(skb->priority) == sch->handle &&
1602 TC_H_MIN(skb->priority) > 0 &&
1603 TC_H_MIN(skb->priority) <= q->tin_cnt)
1604 tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
1608 dscp = cake_handle_diffserv(skb, wash);
1609 tin = q->tin_index[dscp];
1611 if (unlikely(tin >= q->tin_cnt))
1615 return &q->tins[tin];
1618 static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
1619 struct sk_buff *skb, int flow_mode, int *qerr)
1621 struct cake_sched_data *q = qdisc_priv(sch);
1622 struct tcf_proto *filter;
1623 struct tcf_result res;
1624 u16 flow = 0, host = 0;
1627 filter = rcu_dereference_bh(q->filter_list);
1631 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1632 result = tcf_classify(skb, filter, &res, false);
1635 #ifdef CONFIG_NET_CLS_ACT
1640 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1646 if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
1647 flow = TC_H_MIN(res.classid);
1648 if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
1649 host = TC_H_MAJ(res.classid) >> 16;
1652 *t = cake_select_tin(sch, skb);
1653 return cake_hash(*t, skb, flow_mode, flow, host) + 1;
1656 static void cake_reconfigure(struct Qdisc *sch);
1658 static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1659 struct sk_buff **to_free)
1661 struct cake_sched_data *q = qdisc_priv(sch);
1662 int len = qdisc_pkt_len(skb);
1664 struct sk_buff *ack = NULL;
1665 ktime_t now = ktime_get();
1666 struct cake_tin_data *b;
1667 struct cake_flow *flow;
1670 /* choose flow to insert into */
1671 idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
1673 if (ret & __NET_XMIT_BYPASS)
1674 qdisc_qstats_drop(sch);
1675 __qdisc_drop(skb, to_free);
1679 flow = &b->flows[idx];
1681 /* ensure shaper state isn't stale */
1682 if (!b->tin_backlog) {
1683 if (ktime_before(b->time_next_packet, now))
1684 b->time_next_packet = now;
1687 if (ktime_before(q->time_next_packet, now)) {
1688 q->failsafe_next_packet = now;
1689 q->time_next_packet = now;
1690 } else if (ktime_after(q->time_next_packet, now) &&
1691 ktime_after(q->failsafe_next_packet, now)) {
1693 min(ktime_to_ns(q->time_next_packet),
1695 q->failsafe_next_packet));
1696 sch->qstats.overlimits++;
1697 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1702 if (unlikely(len > b->max_skblen))
1703 b->max_skblen = len;
1705 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
1706 struct sk_buff *segs, *nskb;
1707 netdev_features_t features = netif_skb_features(skb);
1708 unsigned int slen = 0, numsegs = 0;
1710 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
1711 if (IS_ERR_OR_NULL(segs))
1712 return qdisc_drop(skb, sch, to_free);
1716 skb_mark_not_on_list(segs);
1717 qdisc_skb_cb(segs)->pkt_len = segs->len;
1718 cobalt_set_enqueue_time(segs, now);
1719 get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
1721 flow_queue_add(flow, segs);
1726 q->buffer_used += segs->truesize;
1733 b->backlogs[idx] += slen;
1734 b->tin_backlog += slen;
1735 sch->qstats.backlog += slen;
1736 q->avg_window_bytes += slen;
1738 qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
1742 cobalt_set_enqueue_time(skb, now);
1743 get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
1744 flow_queue_add(flow, skb);
1747 ack = cake_ack_filter(q, flow);
1751 sch->qstats.drops++;
1752 b->bytes += qdisc_pkt_len(ack);
1753 len -= qdisc_pkt_len(ack);
1754 q->buffer_used += skb->truesize - ack->truesize;
1755 if (q->rate_flags & CAKE_FLAG_INGRESS)
1756 cake_advance_shaper(q, b, ack, now, true);
1758 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
1762 q->buffer_used += skb->truesize;
1768 b->backlogs[idx] += len;
1769 b->tin_backlog += len;
1770 sch->qstats.backlog += len;
1771 q->avg_window_bytes += len;
1774 if (q->overflow_timeout)
1775 cake_heapify_up(q, b->overflow_idx[idx]);
1777 /* incoming bandwidth capacity estimate */
1778 if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
1779 u64 packet_interval = \
1780 ktime_to_ns(ktime_sub(now, q->last_packet_time));
1782 if (packet_interval > NSEC_PER_SEC)
1783 packet_interval = NSEC_PER_SEC;
1785 /* filter out short-term bursts, eg. wifi aggregation */
1786 q->avg_packet_interval = \
1787 cake_ewma(q->avg_packet_interval,
1789 (packet_interval > q->avg_packet_interval ?
1792 q->last_packet_time = now;
1794 if (packet_interval > q->avg_packet_interval) {
1795 u64 window_interval = \
1796 ktime_to_ns(ktime_sub(now,
1797 q->avg_window_begin));
1798 u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
1800 b = div64_u64(b, window_interval);
1801 q->avg_peak_bandwidth =
1802 cake_ewma(q->avg_peak_bandwidth, b,
1803 b > q->avg_peak_bandwidth ? 2 : 8);
1804 q->avg_window_bytes = 0;
1805 q->avg_window_begin = now;
1807 if (ktime_after(now,
1808 ktime_add_ms(q->last_reconfig_time,
1810 q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
1811 cake_reconfigure(sch);
1815 q->avg_window_bytes = 0;
1816 q->last_packet_time = now;
1820 if (!flow->set || flow->set == CAKE_SET_DECAYING) {
1821 struct cake_host *srchost = &b->hosts[flow->srchost];
1822 struct cake_host *dsthost = &b->hosts[flow->dsthost];
1826 list_add_tail(&flow->flowchain, &b->new_flows);
1828 b->decaying_flow_count--;
1829 list_move_tail(&flow->flowchain, &b->new_flows);
1831 flow->set = CAKE_SET_SPARSE;
1832 b->sparse_flow_count++;
1834 if (cake_dsrc(q->flow_mode))
1835 host_load = max(host_load, srchost->srchost_bulk_flow_count);
1837 if (cake_ddst(q->flow_mode))
1838 host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
1840 flow->deficit = (b->flow_quantum *
1841 quantum_div[host_load]) >> 16;
1842 } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
1843 struct cake_host *srchost = &b->hosts[flow->srchost];
1844 struct cake_host *dsthost = &b->hosts[flow->dsthost];
1846 /* this flow was empty, accounted as a sparse flow, but actually
1847 * in the bulk rotation.
1849 flow->set = CAKE_SET_BULK;
1850 b->sparse_flow_count--;
1851 b->bulk_flow_count++;
1853 if (cake_dsrc(q->flow_mode))
1854 srchost->srchost_bulk_flow_count++;
1856 if (cake_ddst(q->flow_mode))
1857 dsthost->dsthost_bulk_flow_count++;
1861 if (q->buffer_used > q->buffer_max_used)
1862 q->buffer_max_used = q->buffer_used;
1864 if (q->buffer_used > q->buffer_limit) {
1867 while (q->buffer_used > q->buffer_limit) {
1869 cake_drop(sch, to_free);
1871 b->drop_overlimit += dropped;
1873 return NET_XMIT_SUCCESS;
1876 static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
1878 struct cake_sched_data *q = qdisc_priv(sch);
1879 struct cake_tin_data *b = &q->tins[q->cur_tin];
1880 struct cake_flow *flow = &b->flows[q->cur_flow];
1881 struct sk_buff *skb = NULL;
1885 skb = dequeue_head(flow);
1886 len = qdisc_pkt_len(skb);
1887 b->backlogs[q->cur_flow] -= len;
1888 b->tin_backlog -= len;
1889 sch->qstats.backlog -= len;
1890 q->buffer_used -= skb->truesize;
1893 if (q->overflow_timeout)
1894 cake_heapify(q, b->overflow_idx[q->cur_flow]);
1899 /* Discard leftover packets from a tin no longer in use. */
1900 static void cake_clear_tin(struct Qdisc *sch, u16 tin)
1902 struct cake_sched_data *q = qdisc_priv(sch);
1903 struct sk_buff *skb;
1906 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
1907 while (!!(skb = cake_dequeue_one(sch)))
1911 static struct sk_buff *cake_dequeue(struct Qdisc *sch)
1913 struct cake_sched_data *q = qdisc_priv(sch);
1914 struct cake_tin_data *b = &q->tins[q->cur_tin];
1915 struct cake_host *srchost, *dsthost;
1916 ktime_t now = ktime_get();
1917 struct cake_flow *flow;
1918 struct list_head *head;
1919 bool first_flow = true;
1920 struct sk_buff *skb;
1929 /* global hard shaper */
1930 if (ktime_after(q->time_next_packet, now) &&
1931 ktime_after(q->failsafe_next_packet, now)) {
1932 u64 next = min(ktime_to_ns(q->time_next_packet),
1933 ktime_to_ns(q->failsafe_next_packet));
1935 sch->qstats.overlimits++;
1936 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1940 /* Choose a class to work on. */
1942 /* In unlimited mode, can't rely on shaper timings, just balance
1945 bool wrapped = false, empty = true;
1947 while (b->tin_deficit < 0 ||
1948 !(b->sparse_flow_count + b->bulk_flow_count)) {
1949 if (b->tin_deficit <= 0)
1950 b->tin_deficit += b->tin_quantum_band;
1951 if (b->sparse_flow_count + b->bulk_flow_count)
1956 if (q->cur_tin >= q->tin_cnt) {
1961 /* It's possible for q->qlen to be
1962 * nonzero when we actually have no
1973 /* In shaped mode, choose:
1974 * - Highest-priority tin with queue and meeting schedule, or
1975 * - The earliest-scheduled tin with queue.
1977 ktime_t best_time = KTIME_MAX;
1978 int tin, best_tin = 0;
1980 for (tin = 0; tin < q->tin_cnt; tin++) {
1982 if ((b->sparse_flow_count + b->bulk_flow_count) > 0) {
1983 ktime_t time_to_pkt = \
1984 ktime_sub(b->time_next_packet, now);
1986 if (ktime_to_ns(time_to_pkt) <= 0 ||
1987 ktime_compare(time_to_pkt,
1989 best_time = time_to_pkt;
1995 q->cur_tin = best_tin;
1996 b = q->tins + best_tin;
1998 /* No point in going further if no packets to deliver. */
1999 if (unlikely(!(b->sparse_flow_count + b->bulk_flow_count)))
2004 /* service this class */
2005 head = &b->decaying_flows;
2006 if (!first_flow || list_empty(head)) {
2007 head = &b->new_flows;
2008 if (list_empty(head)) {
2009 head = &b->old_flows;
2010 if (unlikely(list_empty(head))) {
2011 head = &b->decaying_flows;
2012 if (unlikely(list_empty(head)))
2017 flow = list_first_entry(head, struct cake_flow, flowchain);
2018 q->cur_flow = flow - b->flows;
2021 /* triple isolation (modified DRR++) */
2022 srchost = &b->hosts[flow->srchost];
2023 dsthost = &b->hosts[flow->dsthost];
2026 /* flow isolation (DRR++) */
2027 if (flow->deficit <= 0) {
2028 /* Keep all flows with deficits out of the sparse and decaying
2029 * rotations. No non-empty flow can go into the decaying
2030 * rotation, so they can't get deficits
2032 if (flow->set == CAKE_SET_SPARSE) {
2034 b->sparse_flow_count--;
2035 b->bulk_flow_count++;
2037 if (cake_dsrc(q->flow_mode))
2038 srchost->srchost_bulk_flow_count++;
2040 if (cake_ddst(q->flow_mode))
2041 dsthost->dsthost_bulk_flow_count++;
2043 flow->set = CAKE_SET_BULK;
2045 /* we've moved it to the bulk rotation for
2046 * correct deficit accounting but we still want
2047 * to count it as a sparse flow, not a bulk one.
2049 flow->set = CAKE_SET_SPARSE_WAIT;
2053 if (cake_dsrc(q->flow_mode))
2054 host_load = max(host_load, srchost->srchost_bulk_flow_count);
2056 if (cake_ddst(q->flow_mode))
2057 host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
2059 WARN_ON(host_load > CAKE_QUEUES);
2061 /* The shifted prandom_u32() is a way to apply dithering to
2062 * avoid accumulating roundoff errors
2064 flow->deficit += (b->flow_quantum * quantum_div[host_load] +
2065 (prandom_u32() >> 16)) >> 16;
2066 list_move_tail(&flow->flowchain, &b->old_flows);
2071 /* Retrieve a packet via the AQM */
2073 skb = cake_dequeue_one(sch);
2075 /* this queue was actually empty */
2076 if (cobalt_queue_empty(&flow->cvars, &b->cparams, now))
2077 b->unresponsive_flow_count--;
2079 if (flow->cvars.p_drop || flow->cvars.count ||
2080 ktime_before(now, flow->cvars.drop_next)) {
2081 /* keep in the flowchain until the state has
2084 list_move_tail(&flow->flowchain,
2085 &b->decaying_flows);
2086 if (flow->set == CAKE_SET_BULK) {
2087 b->bulk_flow_count--;
2089 if (cake_dsrc(q->flow_mode))
2090 srchost->srchost_bulk_flow_count--;
2092 if (cake_ddst(q->flow_mode))
2093 dsthost->dsthost_bulk_flow_count--;
2095 b->decaying_flow_count++;
2096 } else if (flow->set == CAKE_SET_SPARSE ||
2097 flow->set == CAKE_SET_SPARSE_WAIT) {
2098 b->sparse_flow_count--;
2099 b->decaying_flow_count++;
2101 flow->set = CAKE_SET_DECAYING;
2103 /* remove empty queue from the flowchain */
2104 list_del_init(&flow->flowchain);
2105 if (flow->set == CAKE_SET_SPARSE ||
2106 flow->set == CAKE_SET_SPARSE_WAIT)
2107 b->sparse_flow_count--;
2108 else if (flow->set == CAKE_SET_BULK) {
2109 b->bulk_flow_count--;
2111 if (cake_dsrc(q->flow_mode))
2112 srchost->srchost_bulk_flow_count--;
2114 if (cake_ddst(q->flow_mode))
2115 dsthost->dsthost_bulk_flow_count--;
2118 b->decaying_flow_count--;
2120 flow->set = CAKE_SET_NONE;
2125 /* Last packet in queue may be marked, shouldn't be dropped */
2126 if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
2127 (b->bulk_flow_count *
2129 CAKE_FLAG_INGRESS))) ||
2133 /* drop this packet, get another one */
2134 if (q->rate_flags & CAKE_FLAG_INGRESS) {
2135 len = cake_advance_shaper(q, b, skb,
2137 flow->deficit -= len;
2138 b->tin_deficit -= len;
2142 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
2143 qdisc_qstats_drop(sch);
2145 if (q->rate_flags & CAKE_FLAG_INGRESS)
2149 b->tin_ecn_mark += !!flow->cvars.ecn_marked;
2150 qdisc_bstats_update(sch, skb);
2152 /* collect delay stats */
2153 delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
2154 b->avge_delay = cake_ewma(b->avge_delay, delay, 8);
2155 b->peak_delay = cake_ewma(b->peak_delay, delay,
2156 delay > b->peak_delay ? 2 : 8);
2157 b->base_delay = cake_ewma(b->base_delay, delay,
2158 delay < b->base_delay ? 2 : 8);
2160 len = cake_advance_shaper(q, b, skb, now, false);
2161 flow->deficit -= len;
2162 b->tin_deficit -= len;
2164 if (ktime_after(q->time_next_packet, now) && sch->q.qlen) {
2165 u64 next = min(ktime_to_ns(q->time_next_packet),
2166 ktime_to_ns(q->failsafe_next_packet));
2168 qdisc_watchdog_schedule_ns(&q->watchdog, next);
2169 } else if (!sch->q.qlen) {
2172 for (i = 0; i < q->tin_cnt; i++) {
2173 if (q->tins[i].decaying_flow_count) {
2176 q->tins[i].cparams.target);
2178 qdisc_watchdog_schedule_ns(&q->watchdog,
2185 if (q->overflow_timeout)
2186 q->overflow_timeout--;
2191 static void cake_reset(struct Qdisc *sch)
2193 struct cake_sched_data *q = qdisc_priv(sch);
2199 for (c = 0; c < CAKE_MAX_TINS; c++)
2200 cake_clear_tin(sch, c);
2203 static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
2204 [TCA_CAKE_BASE_RATE64] = { .type = NLA_U64 },
2205 [TCA_CAKE_DIFFSERV_MODE] = { .type = NLA_U32 },
2206 [TCA_CAKE_ATM] = { .type = NLA_U32 },
2207 [TCA_CAKE_FLOW_MODE] = { .type = NLA_U32 },
2208 [TCA_CAKE_OVERHEAD] = { .type = NLA_S32 },
2209 [TCA_CAKE_RTT] = { .type = NLA_U32 },
2210 [TCA_CAKE_TARGET] = { .type = NLA_U32 },
2211 [TCA_CAKE_AUTORATE] = { .type = NLA_U32 },
2212 [TCA_CAKE_MEMORY] = { .type = NLA_U32 },
2213 [TCA_CAKE_NAT] = { .type = NLA_U32 },
2214 [TCA_CAKE_RAW] = { .type = NLA_U32 },
2215 [TCA_CAKE_WASH] = { .type = NLA_U32 },
2216 [TCA_CAKE_MPU] = { .type = NLA_U32 },
2217 [TCA_CAKE_INGRESS] = { .type = NLA_U32 },
2218 [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
2219 [TCA_CAKE_SPLIT_GSO] = { .type = NLA_U32 },
2220 [TCA_CAKE_FWMARK] = { .type = NLA_U32 },
2223 static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
2224 u64 target_ns, u64 rtt_est_ns)
2226 /* convert byte-rate into time-per-byte
2227 * so it will always unwedge in reasonable time.
2229 static const u64 MIN_RATE = 64;
2230 u32 byte_target = mtu;
2235 b->flow_quantum = 1514;
2237 b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL);
2239 rate_ns = ((u64)NSEC_PER_SEC) << rate_shft;
2240 rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate));
2241 while (!!(rate_ns >> 34)) {
2245 } /* else unlimited, ie. zero delay */
2247 b->tin_rate_bps = rate;
2248 b->tin_rate_ns = rate_ns;
2249 b->tin_rate_shft = rate_shft;
2251 byte_target_ns = (byte_target * rate_ns) >> rate_shft;
2253 b->cparams.target = max((byte_target_ns * 3) / 2, target_ns);
2254 b->cparams.interval = max(rtt_est_ns +
2255 b->cparams.target - target_ns,
2256 b->cparams.target * 2);
2257 b->cparams.mtu_time = byte_target_ns;
2258 b->cparams.p_inc = 1 << 24; /* 1/256 */
2259 b->cparams.p_dec = 1 << 20; /* 1/4096 */
2262 static int cake_config_besteffort(struct Qdisc *sch)
2264 struct cake_sched_data *q = qdisc_priv(sch);
2265 struct cake_tin_data *b = &q->tins[0];
2266 u32 mtu = psched_mtu(qdisc_dev(sch));
2267 u64 rate = q->rate_bps;
2271 q->tin_index = besteffort;
2272 q->tin_order = normal_order;
2274 cake_set_rate(b, rate, mtu,
2275 us_to_ns(q->target), us_to_ns(q->interval));
2276 b->tin_quantum_band = 65535;
2277 b->tin_quantum_prio = 65535;
2282 static int cake_config_precedence(struct Qdisc *sch)
2284 /* convert high-level (user visible) parameters into internal format */
2285 struct cake_sched_data *q = qdisc_priv(sch);
2286 u32 mtu = psched_mtu(qdisc_dev(sch));
2287 u64 rate = q->rate_bps;
2293 q->tin_index = precedence;
2294 q->tin_order = normal_order;
2296 for (i = 0; i < q->tin_cnt; i++) {
2297 struct cake_tin_data *b = &q->tins[i];
2299 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
2300 us_to_ns(q->interval));
2302 b->tin_quantum_prio = max_t(u16, 1U, quantum1);
2303 b->tin_quantum_band = max_t(u16, 1U, quantum2);
2305 /* calculate next class's parameters */
2319 /* List of known Diffserv codepoints:
2321 * Least Effort (CS1)
2323 * Max Reliability & LLT "Lo" (TOS1)
2324 * Max Throughput (TOS2)
2327 * Assured Forwarding 1 (AF1x) - x3
2328 * Assured Forwarding 2 (AF2x) - x3
2329 * Assured Forwarding 3 (AF3x) - x3
2330 * Assured Forwarding 4 (AF4x) - x3
2331 * Precedence Class 2 (CS2)
2332 * Precedence Class 3 (CS3)
2333 * Precedence Class 4 (CS4)
2334 * Precedence Class 5 (CS5)
2335 * Precedence Class 6 (CS6)
2336 * Precedence Class 7 (CS7)
2338 * Expedited Forwarding (EF)
2340 * Total 25 codepoints.
2343 /* List of traffic classes in RFC 4594:
2344 * (roughly descending order of contended priority)
2345 * (roughly ascending order of uncontended throughput)
2347 * Network Control (CS6,CS7) - routing traffic
2348 * Telephony (EF,VA) - aka. VoIP streams
2349 * Signalling (CS5) - VoIP setup
2350 * Multimedia Conferencing (AF4x) - aka. video calls
2351 * Realtime Interactive (CS4) - eg. games
2352 * Multimedia Streaming (AF3x) - eg. YouTube, NetFlix, Twitch
2353 * Broadcast Video (CS3)
2354 * Low Latency Data (AF2x,TOS4) - eg. database
2355 * Ops, Admin, Management (CS2,TOS1) - eg. ssh
2356 * Standard Service (CS0 & unrecognised codepoints)
2357 * High Throughput Data (AF1x,TOS2) - eg. web traffic
2358 * Low Priority Data (CS1) - eg. BitTorrent
2360 * Total 12 traffic classes.
2363 static int cake_config_diffserv8(struct Qdisc *sch)
2365 /* Pruned list of traffic classes for typical applications:
2367 * Network Control (CS6, CS7)
2368 * Minimum Latency (EF, VA, CS5, CS4)
2369 * Interactive Shell (CS2, TOS1)
2370 * Low Latency Transactions (AF2x, TOS4)
2371 * Video Streaming (AF4x, AF3x, CS3)
2372 * Bog Standard (CS0 etc.)
2373 * High Throughput (AF1x, TOS2)
2374 * Background Traffic (CS1)
2376 * Total 8 traffic classes.
2379 struct cake_sched_data *q = qdisc_priv(sch);
2380 u32 mtu = psched_mtu(qdisc_dev(sch));
2381 u64 rate = q->rate_bps;
2388 /* codepoint to class mapping */
2389 q->tin_index = diffserv8;
2390 q->tin_order = normal_order;
2392 /* class characteristics */
2393 for (i = 0; i < q->tin_cnt; i++) {
2394 struct cake_tin_data *b = &q->tins[i];
2396 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
2397 us_to_ns(q->interval));
2399 b->tin_quantum_prio = max_t(u16, 1U, quantum1);
2400 b->tin_quantum_band = max_t(u16, 1U, quantum2);
2402 /* calculate next class's parameters */
2416 static int cake_config_diffserv4(struct Qdisc *sch)
2418 /* Further pruned list of traffic classes for four-class system:
2420 * Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4)
2421 * Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
2422 * Best Effort (CS0, AF1x, TOS2, and those not specified)
2423 * Background Traffic (CS1)
2425 * Total 4 traffic classes.
2428 struct cake_sched_data *q = qdisc_priv(sch);
2429 u32 mtu = psched_mtu(qdisc_dev(sch));
2430 u64 rate = q->rate_bps;
2435 /* codepoint to class mapping */
2436 q->tin_index = diffserv4;
2437 q->tin_order = bulk_order;
2439 /* class characteristics */
2440 cake_set_rate(&q->tins[0], rate, mtu,
2441 us_to_ns(q->target), us_to_ns(q->interval));
2442 cake_set_rate(&q->tins[1], rate >> 4, mtu,
2443 us_to_ns(q->target), us_to_ns(q->interval));
2444 cake_set_rate(&q->tins[2], rate >> 1, mtu,
2445 us_to_ns(q->target), us_to_ns(q->interval));
2446 cake_set_rate(&q->tins[3], rate >> 2, mtu,
2447 us_to_ns(q->target), us_to_ns(q->interval));
2449 /* priority weights */
2450 q->tins[0].tin_quantum_prio = quantum;
2451 q->tins[1].tin_quantum_prio = quantum >> 4;
2452 q->tins[2].tin_quantum_prio = quantum << 2;
2453 q->tins[3].tin_quantum_prio = quantum << 4;
2455 /* bandwidth-sharing weights */
2456 q->tins[0].tin_quantum_band = quantum;
2457 q->tins[1].tin_quantum_band = quantum >> 4;
2458 q->tins[2].tin_quantum_band = quantum >> 1;
2459 q->tins[3].tin_quantum_band = quantum >> 2;
2464 static int cake_config_diffserv3(struct Qdisc *sch)
2466 /* Simplified Diffserv structure with 3 tins.
2467 * Low Priority (CS1)
2469 * Latency Sensitive (TOS4, VA, EF, CS6, CS7)
2471 struct cake_sched_data *q = qdisc_priv(sch);
2472 u32 mtu = psched_mtu(qdisc_dev(sch));
2473 u64 rate = q->rate_bps;
2478 /* codepoint to class mapping */
2479 q->tin_index = diffserv3;
2480 q->tin_order = bulk_order;
2482 /* class characteristics */
2483 cake_set_rate(&q->tins[0], rate, mtu,
2484 us_to_ns(q->target), us_to_ns(q->interval));
2485 cake_set_rate(&q->tins[1], rate >> 4, mtu,
2486 us_to_ns(q->target), us_to_ns(q->interval));
2487 cake_set_rate(&q->tins[2], rate >> 2, mtu,
2488 us_to_ns(q->target), us_to_ns(q->interval));
2490 /* priority weights */
2491 q->tins[0].tin_quantum_prio = quantum;
2492 q->tins[1].tin_quantum_prio = quantum >> 4;
2493 q->tins[2].tin_quantum_prio = quantum << 4;
2495 /* bandwidth-sharing weights */
2496 q->tins[0].tin_quantum_band = quantum;
2497 q->tins[1].tin_quantum_band = quantum >> 4;
2498 q->tins[2].tin_quantum_band = quantum >> 2;
2503 static void cake_reconfigure(struct Qdisc *sch)
2505 struct cake_sched_data *q = qdisc_priv(sch);
2508 switch (q->tin_mode) {
2509 case CAKE_DIFFSERV_BESTEFFORT:
2510 ft = cake_config_besteffort(sch);
2513 case CAKE_DIFFSERV_PRECEDENCE:
2514 ft = cake_config_precedence(sch);
2517 case CAKE_DIFFSERV_DIFFSERV8:
2518 ft = cake_config_diffserv8(sch);
2521 case CAKE_DIFFSERV_DIFFSERV4:
2522 ft = cake_config_diffserv4(sch);
2525 case CAKE_DIFFSERV_DIFFSERV3:
2527 ft = cake_config_diffserv3(sch);
2531 for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) {
2532 cake_clear_tin(sch, c);
2533 q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time;
2536 q->rate_ns = q->tins[ft].tin_rate_ns;
2537 q->rate_shft = q->tins[ft].tin_rate_shft;
2539 if (q->buffer_config_limit) {
2540 q->buffer_limit = q->buffer_config_limit;
2541 } else if (q->rate_bps) {
2542 u64 t = q->rate_bps * q->interval;
2544 do_div(t, USEC_PER_SEC / 4);
2545 q->buffer_limit = max_t(u32, t, 4U << 20);
2547 q->buffer_limit = ~0;
2550 sch->flags &= ~TCQ_F_CAN_BYPASS;
2552 q->buffer_limit = min(q->buffer_limit,
2553 max(sch->limit * psched_mtu(qdisc_dev(sch)),
2554 q->buffer_config_limit));
2557 static int cake_change(struct Qdisc *sch, struct nlattr *opt,
2558 struct netlink_ext_ack *extack)
2560 struct cake_sched_data *q = qdisc_priv(sch);
2561 struct nlattr *tb[TCA_CAKE_MAX + 1];
2567 err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy,
2572 if (tb[TCA_CAKE_NAT]) {
2573 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
2574 q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
2575 q->flow_mode |= CAKE_FLOW_NAT_FLAG *
2576 !!nla_get_u32(tb[TCA_CAKE_NAT]);
2578 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
2579 "No conntrack support in kernel");
2584 if (tb[TCA_CAKE_BASE_RATE64])
2585 q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
2587 if (tb[TCA_CAKE_DIFFSERV_MODE])
2588 q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]);
2590 if (tb[TCA_CAKE_WASH]) {
2591 if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
2592 q->rate_flags |= CAKE_FLAG_WASH;
2594 q->rate_flags &= ~CAKE_FLAG_WASH;
2597 if (tb[TCA_CAKE_FLOW_MODE])
2598 q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) |
2599 (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
2602 if (tb[TCA_CAKE_ATM])
2603 q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]);
2605 if (tb[TCA_CAKE_OVERHEAD]) {
2606 q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]);
2607 q->rate_flags |= CAKE_FLAG_OVERHEAD;
2615 if (tb[TCA_CAKE_RAW]) {
2616 q->rate_flags &= ~CAKE_FLAG_OVERHEAD;
2624 if (tb[TCA_CAKE_MPU])
2625 q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]);
2627 if (tb[TCA_CAKE_RTT]) {
2628 q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
2634 if (tb[TCA_CAKE_TARGET]) {
2635 q->target = nla_get_u32(tb[TCA_CAKE_TARGET]);
2641 if (tb[TCA_CAKE_AUTORATE]) {
2642 if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
2643 q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
2645 q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
2648 if (tb[TCA_CAKE_INGRESS]) {
2649 if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
2650 q->rate_flags |= CAKE_FLAG_INGRESS;
2652 q->rate_flags &= ~CAKE_FLAG_INGRESS;
2655 if (tb[TCA_CAKE_ACK_FILTER])
2656 q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
2658 if (tb[TCA_CAKE_MEMORY])
2659 q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
2661 if (tb[TCA_CAKE_SPLIT_GSO]) {
2662 if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO]))
2663 q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
2665 q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
2668 if (tb[TCA_CAKE_FWMARK]) {
2669 q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
2670 q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0;
2675 cake_reconfigure(sch);
2676 sch_tree_unlock(sch);
2682 static void cake_destroy(struct Qdisc *sch)
2684 struct cake_sched_data *q = qdisc_priv(sch);
2686 qdisc_watchdog_cancel(&q->watchdog);
2687 tcf_block_put(q->block);
2691 static int cake_init(struct Qdisc *sch, struct nlattr *opt,
2692 struct netlink_ext_ack *extack)
2694 struct cake_sched_data *q = qdisc_priv(sch);
2698 q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
2699 q->flow_mode = CAKE_FLOW_TRIPLE;
2701 q->rate_bps = 0; /* unlimited by default */
2703 q->interval = 100000; /* 100ms default */
2704 q->target = 5000; /* 5ms: codel RFC argues
2705 * for 5 to 10% of interval
2707 q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
2711 qdisc_watchdog_init(&q->watchdog, sch);
2714 err = cake_change(sch, opt, extack);
2720 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
2724 quantum_div[0] = ~0;
2725 for (i = 1; i <= CAKE_QUEUES; i++)
2726 quantum_div[i] = 65535 / i;
2728 q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
2733 for (i = 0; i < CAKE_MAX_TINS; i++) {
2734 struct cake_tin_data *b = q->tins + i;
2736 INIT_LIST_HEAD(&b->new_flows);
2737 INIT_LIST_HEAD(&b->old_flows);
2738 INIT_LIST_HEAD(&b->decaying_flows);
2739 b->sparse_flow_count = 0;
2740 b->bulk_flow_count = 0;
2741 b->decaying_flow_count = 0;
2743 for (j = 0; j < CAKE_QUEUES; j++) {
2744 struct cake_flow *flow = b->flows + j;
2745 u32 k = j * CAKE_MAX_TINS + i;
2747 INIT_LIST_HEAD(&flow->flowchain);
2748 cobalt_vars_init(&flow->cvars);
2750 q->overflow_heap[k].t = i;
2751 q->overflow_heap[k].b = j;
2752 b->overflow_idx[j] = k;
2756 cake_reconfigure(sch);
2757 q->avg_peak_bandwidth = q->rate_bps;
2763 static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
2765 struct cake_sched_data *q = qdisc_priv(sch);
2766 struct nlattr *opts;
2768 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
2770 goto nla_put_failure;
2772 if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
2774 goto nla_put_failure;
2776 if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
2777 q->flow_mode & CAKE_FLOW_MASK))
2778 goto nla_put_failure;
2780 if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
2781 goto nla_put_failure;
2783 if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
2784 goto nla_put_failure;
2786 if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
2787 goto nla_put_failure;
2789 if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
2790 !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
2791 goto nla_put_failure;
2793 if (nla_put_u32(skb, TCA_CAKE_INGRESS,
2794 !!(q->rate_flags & CAKE_FLAG_INGRESS)))
2795 goto nla_put_failure;
2797 if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
2798 goto nla_put_failure;
2800 if (nla_put_u32(skb, TCA_CAKE_NAT,
2801 !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
2802 goto nla_put_failure;
2804 if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode))
2805 goto nla_put_failure;
2807 if (nla_put_u32(skb, TCA_CAKE_WASH,
2808 !!(q->rate_flags & CAKE_FLAG_WASH)))
2809 goto nla_put_failure;
2811 if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead))
2812 goto nla_put_failure;
2814 if (!(q->rate_flags & CAKE_FLAG_OVERHEAD))
2815 if (nla_put_u32(skb, TCA_CAKE_RAW, 0))
2816 goto nla_put_failure;
2818 if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode))
2819 goto nla_put_failure;
2821 if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu))
2822 goto nla_put_failure;
2824 if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
2825 !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
2826 goto nla_put_failure;
2828 if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
2829 goto nla_put_failure;
2831 return nla_nest_end(skb, opts);
2837 static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
2839 struct nlattr *stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
2840 struct cake_sched_data *q = qdisc_priv(sch);
2841 struct nlattr *tstats, *ts;
2847 #define PUT_STAT_U32(attr, data) do { \
2848 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2849 goto nla_put_failure; \
2851 #define PUT_STAT_U64(attr, data) do { \
2852 if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \
2853 data, TCA_CAKE_STATS_PAD)) \
2854 goto nla_put_failure; \
2857 PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth);
2858 PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit);
2859 PUT_STAT_U32(MEMORY_USED, q->buffer_max_used);
2860 PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16));
2861 PUT_STAT_U32(MAX_NETLEN, q->max_netlen);
2862 PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen);
2863 PUT_STAT_U32(MIN_NETLEN, q->min_netlen);
2864 PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen);
2869 tstats = nla_nest_start_noflag(d->skb, TCA_CAKE_STATS_TIN_STATS);
2871 goto nla_put_failure;
2873 #define PUT_TSTAT_U32(attr, data) do { \
2874 if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \
2875 goto nla_put_failure; \
2877 #define PUT_TSTAT_U64(attr, data) do { \
2878 if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \
2879 data, TCA_CAKE_TIN_STATS_PAD)) \
2880 goto nla_put_failure; \
2883 for (i = 0; i < q->tin_cnt; i++) {
2884 struct cake_tin_data *b = &q->tins[q->tin_order[i]];
2886 ts = nla_nest_start_noflag(d->skb, i + 1);
2888 goto nla_put_failure;
2890 PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps);
2891 PUT_TSTAT_U64(SENT_BYTES64, b->bytes);
2892 PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog);
2894 PUT_TSTAT_U32(TARGET_US,
2895 ktime_to_us(ns_to_ktime(b->cparams.target)));
2896 PUT_TSTAT_U32(INTERVAL_US,
2897 ktime_to_us(ns_to_ktime(b->cparams.interval)));
2899 PUT_TSTAT_U32(SENT_PACKETS, b->packets);
2900 PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped);
2901 PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark);
2902 PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops);
2904 PUT_TSTAT_U32(PEAK_DELAY_US,
2905 ktime_to_us(ns_to_ktime(b->peak_delay)));
2906 PUT_TSTAT_U32(AVG_DELAY_US,
2907 ktime_to_us(ns_to_ktime(b->avge_delay)));
2908 PUT_TSTAT_U32(BASE_DELAY_US,
2909 ktime_to_us(ns_to_ktime(b->base_delay)));
2911 PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits);
2912 PUT_TSTAT_U32(WAY_MISSES, b->way_misses);
2913 PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions);
2915 PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count +
2916 b->decaying_flow_count);
2917 PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count);
2918 PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count);
2919 PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen);
2921 PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum);
2922 nla_nest_end(d->skb, ts);
2925 #undef PUT_TSTAT_U32
2926 #undef PUT_TSTAT_U64
2928 nla_nest_end(d->skb, tstats);
2929 return nla_nest_end(d->skb, stats);
2932 nla_nest_cancel(d->skb, stats);
2936 static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg)
2941 static unsigned long cake_find(struct Qdisc *sch, u32 classid)
2946 static unsigned long cake_bind(struct Qdisc *sch, unsigned long parent,
2952 static void cake_unbind(struct Qdisc *q, unsigned long cl)
2956 static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl,
2957 struct netlink_ext_ack *extack)
2959 struct cake_sched_data *q = qdisc_priv(sch);
2966 static int cake_dump_class(struct Qdisc *sch, unsigned long cl,
2967 struct sk_buff *skb, struct tcmsg *tcm)
2969 tcm->tcm_handle |= TC_H_MIN(cl);
2973 static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
2974 struct gnet_dump *d)
2976 struct cake_sched_data *q = qdisc_priv(sch);
2977 const struct cake_flow *flow = NULL;
2978 struct gnet_stats_queue qs = { 0 };
2979 struct nlattr *stats;
2982 if (idx < CAKE_QUEUES * q->tin_cnt) {
2983 const struct cake_tin_data *b = \
2984 &q->tins[q->tin_order[idx / CAKE_QUEUES]];
2985 const struct sk_buff *skb;
2987 flow = &b->flows[idx % CAKE_QUEUES];
2996 sch_tree_unlock(sch);
2998 qs.backlog = b->backlogs[idx % CAKE_QUEUES];
2999 qs.drops = flow->dropped;
3001 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
3004 ktime_t now = ktime_get();
3006 stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
3010 #define PUT_STAT_U32(attr, data) do { \
3011 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
3012 goto nla_put_failure; \
3014 #define PUT_STAT_S32(attr, data) do { \
3015 if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
3016 goto nla_put_failure; \
3019 PUT_STAT_S32(DEFICIT, flow->deficit);
3020 PUT_STAT_U32(DROPPING, flow->cvars.dropping);
3021 PUT_STAT_U32(COBALT_COUNT, flow->cvars.count);
3022 PUT_STAT_U32(P_DROP, flow->cvars.p_drop);
3023 if (flow->cvars.p_drop) {
3024 PUT_STAT_S32(BLUE_TIMER_US,
3027 flow->cvars.blue_timer)));
3029 if (flow->cvars.dropping) {
3030 PUT_STAT_S32(DROP_NEXT_US,
3033 flow->cvars.drop_next)));
3036 if (nla_nest_end(d->skb, stats) < 0)
3043 nla_nest_cancel(d->skb, stats);
3047 static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg)
3049 struct cake_sched_data *q = qdisc_priv(sch);
3055 for (i = 0; i < q->tin_cnt; i++) {
3056 struct cake_tin_data *b = &q->tins[q->tin_order[i]];
3058 for (j = 0; j < CAKE_QUEUES; j++) {
3059 if (list_empty(&b->flows[j].flowchain) ||
3060 arg->count < arg->skip) {
3064 if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) {
3073 static const struct Qdisc_class_ops cake_class_ops = {
3076 .tcf_block = cake_tcf_block,
3077 .bind_tcf = cake_bind,
3078 .unbind_tcf = cake_unbind,
3079 .dump = cake_dump_class,
3080 .dump_stats = cake_dump_class_stats,
3084 static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
3085 .cl_ops = &cake_class_ops,
3087 .priv_size = sizeof(struct cake_sched_data),
3088 .enqueue = cake_enqueue,
3089 .dequeue = cake_dequeue,
3090 .peek = qdisc_peek_dequeued,
3092 .reset = cake_reset,
3093 .destroy = cake_destroy,
3094 .change = cake_change,
3096 .dump_stats = cake_dump_stats,
3097 .owner = THIS_MODULE,
3100 static int __init cake_module_init(void)
3102 return register_qdisc(&cake_qdisc_ops);
3105 static void __exit cake_module_exit(void)
3107 unregister_qdisc(&cake_qdisc_ops);
3110 module_init(cake_module_init)
3111 module_exit(cake_module_exit)
3112 MODULE_AUTHOR("Jonathan Morton");
3113 MODULE_LICENSE("Dual BSD/GPL");
3114 MODULE_DESCRIPTION("The CAKE shaper.");