1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 /* COMMON Applications Kept Enhanced (CAKE) discipline
5 * Copyright (C) 2014-2018 Jonathan Morton <chromatix99@gmail.com>
6 * Copyright (C) 2015-2018 Toke Høiland-Jørgensen <toke@toke.dk>
7 * Copyright (C) 2014-2018 Dave Täht <dave.taht@gmail.com>
8 * Copyright (C) 2015-2018 Sebastian Moeller <moeller0@gmx.de>
9 * (C) 2015-2018 Kevin Darbyshire-Bryant <kevin@darbyshire-bryant.me.uk>
10 * Copyright (C) 2017-2018 Ryan Mounce <ryan@mounce.com.au>
12 * The CAKE Principles:
13 * (or, how to have your cake and eat it too)
15 * This is a combination of several shaping, AQM and FQ techniques into one
16 * easy-to-use package:
18 * - An overall bandwidth shaper, to move the bottleneck away from dumb CPE
19 * equipment and bloated MACs. This operates in deficit mode (as in sch_fq),
20 * eliminating the need for any sort of burst parameter (eg. token bucket
21 * depth). Burst support is limited to that necessary to overcome scheduling
24 * - A Diffserv-aware priority queue, giving more priority to certain classes,
25 * up to a specified fraction of bandwidth. Above that bandwidth threshold,
26 * the priority is reduced to avoid starving other tins.
28 * - Each priority tin has a separate Flow Queue system, to isolate traffic
29 * flows from each other. This prevents a burst on one flow from increasing
30 * the delay to another. Flows are distributed to queues using a
31 * set-associative hash function.
33 * - Each queue is actively managed by Cobalt, which is a combination of the
34 * Codel and Blue AQM algorithms. This serves flows fairly, and signals
35 * congestion early via ECN (if available) and/or packet drops, to keep
36 * latency low. The codel parameters are auto-tuned based on the bandwidth
37 * setting, as is necessary at low bandwidths.
39 * The configuration parameters are kept deliberately simple for ease of use.
40 * Everything has sane defaults. Complete generality of configuration is *not*
43 * The priority queue operates according to a weighted DRR scheme, combined with
44 * a bandwidth tracker which reuses the shaper logic to detect which side of the
45 * bandwidth sharing threshold the tin is operating. This determines whether a
46 * priority-based weight (high) or a bandwidth-based weight (low) is used for
47 * that tin in the current pass.
49 * This qdisc was inspired by Eric Dumazet's fq_codel code, which he kindly
50 * granted us permission to leverage.
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/kernel.h>
56 #include <linux/jiffies.h>
57 #include <linux/string.h>
59 #include <linux/errno.h>
60 #include <linux/init.h>
61 #include <linux/skbuff.h>
62 #include <linux/jhash.h>
63 #include <linux/slab.h>
64 #include <linux/vmalloc.h>
65 #include <linux/reciprocal_div.h>
66 #include <net/netlink.h>
67 #include <linux/if_vlan.h>
68 #include <net/pkt_sched.h>
69 #include <net/pkt_cls.h>
71 #include <net/flow_dissector.h>
73 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
74 #include <net/netfilter/nf_conntrack_core.h>
77 #define CAKE_SET_WAYS (8)
78 #define CAKE_MAX_TINS (8)
79 #define CAKE_QUEUES (1024)
80 #define CAKE_FLOW_MASK 63
81 #define CAKE_FLOW_NAT_FLAG 64
83 /* struct cobalt_params - contains codel and blue parameters
84 * @interval: codel initial drop rate
85 * @target: maximum persistent sojourn time & blue update rate
86 * @mtu_time: serialisation delay of maximum-size packet
87 * @p_inc: increment of blue drop probability (0.32 fxp)
88 * @p_dec: decrement of blue drop probability (0.32 fxp)
90 struct cobalt_params {
98 /* struct cobalt_vars - contains codel and blue variables
99 * @count: codel dropping frequency
100 * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
101 * @drop_next: time to drop next packet, or when we dropped last
102 * @blue_timer: Blue time to next drop
103 * @p_drop: BLUE drop probability (0.32 fxp)
104 * @dropping: set if in dropping state
105 * @ecn_marked: set if marked
120 CAKE_SET_SPARSE_WAIT, /* counted in SPARSE, actually in BULK */
126 /* this stuff is all needed per-flow at dequeue time */
127 struct sk_buff *head;
128 struct sk_buff *tail;
129 struct list_head flowchain;
132 struct cobalt_vars cvars;
133 u16 srchost; /* index into cake_host table */
136 }; /* please try to keep this structure <= 64 bytes */
145 struct cake_heap_entry {
149 struct cake_tin_data {
150 struct cake_flow flows[CAKE_QUEUES];
151 u32 backlogs[CAKE_QUEUES];
152 u32 tags[CAKE_QUEUES]; /* for set association */
153 u16 overflow_idx[CAKE_QUEUES];
154 struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */
157 struct cobalt_params cparams;
160 u16 sparse_flow_count;
161 u16 decaying_flow_count;
162 u16 unresponsive_flow_count;
166 struct list_head new_flows;
167 struct list_head old_flows;
168 struct list_head decaying_flows;
170 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
171 ktime_t time_next_packet;
176 u16 tin_quantum_prio;
177 u16 tin_quantum_band;
188 /* moving averages */
193 /* hash function stats */
198 }; /* number of tins is small, so size of this struct doesn't matter much */
200 struct cake_sched_data {
201 struct tcf_proto __rcu *filter_list; /* optional external classifier */
202 struct tcf_block *block;
203 struct cake_tin_data *tins;
205 struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
206 u16 overflow_timeout;
214 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
216 ktime_t time_next_packet;
217 ktime_t failsafe_next_packet;
226 /* resource tracking */
230 u32 buffer_config_limit;
232 /* indices for dequeue */
236 struct qdisc_watchdog watchdog;
240 /* bandwidth capacity estimate */
241 ktime_t last_packet_time;
242 ktime_t avg_window_begin;
243 u64 avg_packet_interval;
244 u64 avg_window_bytes;
245 u64 avg_peak_bandwidth;
246 ktime_t last_reconfig_time;
248 /* packet length stats */
257 CAKE_FLAG_OVERHEAD = BIT(0),
258 CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
259 CAKE_FLAG_INGRESS = BIT(2),
260 CAKE_FLAG_WASH = BIT(3),
261 CAKE_FLAG_SPLIT_GSO = BIT(4)
264 /* COBALT operates the Codel and BLUE algorithms in parallel, in order to
265 * obtain the best features of each. Codel is excellent on flows which
266 * respond to congestion signals in a TCP-like way. BLUE is more effective on
267 * unresponsive flows.
270 struct cobalt_skb_cb {
271 ktime_t enqueue_time;
275 static u64 us_to_ns(u64 us)
277 return us * NSEC_PER_USEC;
280 static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb)
282 qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb));
283 return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data;
286 static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb)
288 return get_cobalt_cb(skb)->enqueue_time;
291 static void cobalt_set_enqueue_time(struct sk_buff *skb,
294 get_cobalt_cb(skb)->enqueue_time = now;
297 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
299 /* Diffserv lookup tables */
301 static const u8 precedence[] = {
302 0, 0, 0, 0, 0, 0, 0, 0,
303 1, 1, 1, 1, 1, 1, 1, 1,
304 2, 2, 2, 2, 2, 2, 2, 2,
305 3, 3, 3, 3, 3, 3, 3, 3,
306 4, 4, 4, 4, 4, 4, 4, 4,
307 5, 5, 5, 5, 5, 5, 5, 5,
308 6, 6, 6, 6, 6, 6, 6, 6,
309 7, 7, 7, 7, 7, 7, 7, 7,
312 static const u8 diffserv8[] = {
313 2, 5, 1, 2, 4, 2, 2, 2,
314 0, 2, 1, 2, 1, 2, 1, 2,
315 5, 2, 4, 2, 4, 2, 4, 2,
316 3, 2, 3, 2, 3, 2, 3, 2,
317 6, 2, 3, 2, 3, 2, 3, 2,
318 6, 2, 2, 2, 6, 2, 6, 2,
319 7, 2, 2, 2, 2, 2, 2, 2,
320 7, 2, 2, 2, 2, 2, 2, 2,
323 static const u8 diffserv4[] = {
324 0, 2, 0, 0, 2, 0, 0, 0,
325 1, 0, 0, 0, 0, 0, 0, 0,
326 2, 0, 2, 0, 2, 0, 2, 0,
327 2, 0, 2, 0, 2, 0, 2, 0,
328 3, 0, 2, 0, 2, 0, 2, 0,
329 3, 0, 0, 0, 3, 0, 3, 0,
330 3, 0, 0, 0, 0, 0, 0, 0,
331 3, 0, 0, 0, 0, 0, 0, 0,
334 static const u8 diffserv3[] = {
335 0, 0, 0, 0, 2, 0, 0, 0,
336 1, 0, 0, 0, 0, 0, 0, 0,
337 0, 0, 0, 0, 0, 0, 0, 0,
338 0, 0, 0, 0, 0, 0, 0, 0,
339 0, 0, 0, 0, 0, 0, 0, 0,
340 0, 0, 0, 0, 2, 0, 2, 0,
341 2, 0, 0, 0, 0, 0, 0, 0,
342 2, 0, 0, 0, 0, 0, 0, 0,
345 static const u8 besteffort[] = {
346 0, 0, 0, 0, 0, 0, 0, 0,
347 0, 0, 0, 0, 0, 0, 0, 0,
348 0, 0, 0, 0, 0, 0, 0, 0,
349 0, 0, 0, 0, 0, 0, 0, 0,
350 0, 0, 0, 0, 0, 0, 0, 0,
351 0, 0, 0, 0, 0, 0, 0, 0,
352 0, 0, 0, 0, 0, 0, 0, 0,
353 0, 0, 0, 0, 0, 0, 0, 0,
356 /* tin priority order for stats dumping */
358 static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
359 static const u8 bulk_order[] = {1, 0, 2, 3};
361 #define REC_INV_SQRT_CACHE (16)
362 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
364 /* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
365 * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
367 * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
370 static void cobalt_newton_step(struct cobalt_vars *vars)
372 u32 invsqrt, invsqrt2;
375 invsqrt = vars->rec_inv_sqrt;
376 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
377 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
379 val >>= 2; /* avoid overflow in following multiply */
380 val = (val * invsqrt) >> (32 - 2 + 1);
382 vars->rec_inv_sqrt = val;
385 static void cobalt_invsqrt(struct cobalt_vars *vars)
387 if (vars->count < REC_INV_SQRT_CACHE)
388 vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
390 cobalt_newton_step(vars);
393 /* There is a big difference in timing between the accurate values placed in
394 * the cache and the approximations given by a single Newton step for small
395 * count values, particularly when stepping from count 1 to 2 or vice versa.
396 * Above 16, a single Newton step gives sufficient accuracy in either
397 * direction, given the precision stored.
399 * The magnitude of the error when stepping up to count 2 is such as to give
400 * the value that *should* have been produced at count 4.
403 static void cobalt_cache_init(void)
405 struct cobalt_vars v;
407 memset(&v, 0, sizeof(v));
408 v.rec_inv_sqrt = ~0U;
409 cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
411 for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
412 cobalt_newton_step(&v);
413 cobalt_newton_step(&v);
414 cobalt_newton_step(&v);
415 cobalt_newton_step(&v);
417 cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt;
421 static void cobalt_vars_init(struct cobalt_vars *vars)
423 memset(vars, 0, sizeof(*vars));
425 if (!cobalt_rec_inv_sqrt_cache[0]) {
427 cobalt_rec_inv_sqrt_cache[0] = ~0;
431 /* CoDel control_law is t + interval/sqrt(count)
432 * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
433 * both sqrt() and divide operation.
435 static ktime_t cobalt_control(ktime_t t,
439 return ktime_add_ns(t, reciprocal_scale(interval,
443 /* Call this when a packet had to be dropped due to queue overflow. Returns
444 * true if the BLUE state was quiescent before but active after this call.
446 static bool cobalt_queue_full(struct cobalt_vars *vars,
447 struct cobalt_params *p,
452 if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
454 vars->p_drop += p->p_inc;
455 if (vars->p_drop < p->p_inc)
457 vars->blue_timer = now;
459 vars->dropping = true;
460 vars->drop_next = now;
467 /* Call this when the queue was serviced but turned out to be empty. Returns
468 * true if the BLUE state was active before but quiescent after this call.
470 static bool cobalt_queue_empty(struct cobalt_vars *vars,
471 struct cobalt_params *p,
477 ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
478 if (vars->p_drop < p->p_dec)
481 vars->p_drop -= p->p_dec;
482 vars->blue_timer = now;
483 down = !vars->p_drop;
485 vars->dropping = false;
487 if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) {
489 cobalt_invsqrt(vars);
490 vars->drop_next = cobalt_control(vars->drop_next,
498 /* Call this with a freshly dequeued packet for possible congestion marking.
499 * Returns true as an instruction to drop the packet, false for delivery.
501 static bool cobalt_should_drop(struct cobalt_vars *vars,
502 struct cobalt_params *p,
507 bool next_due, over_target, drop = false;
511 /* The 'schedule' variable records, in its sign, whether 'now' is before or
512 * after 'drop_next'. This allows 'drop_next' to be updated before the next
513 * scheduling decision is actually branched, without destroying that
514 * information. Similarly, the first 'schedule' value calculated is preserved
515 * in the boolean 'next_due'.
517 * As for 'drop_next', we take advantage of the fact that 'interval' is both
518 * the delay between first exceeding 'target' and the first signalling event,
519 * *and* the scaling factor for the signalling frequency. It's therefore very
520 * natural to use a single mechanism for both purposes, and eliminates a
521 * significant amount of reference Codel's spaghetti code. To help with this,
522 * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close
523 * as possible to 1.0 in fixed-point.
526 sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
527 schedule = ktime_sub(now, vars->drop_next);
528 over_target = sojourn > p->target &&
529 sojourn > p->mtu_time * bulk_flows * 2 &&
530 sojourn > p->mtu_time * 4;
531 next_due = vars->count && ktime_to_ns(schedule) >= 0;
533 vars->ecn_marked = false;
536 if (!vars->dropping) {
537 vars->dropping = true;
538 vars->drop_next = cobalt_control(now,
544 } else if (vars->dropping) {
545 vars->dropping = false;
548 if (next_due && vars->dropping) {
549 /* Use ECN mark if possible, otherwise drop */
550 drop = !(vars->ecn_marked = INET_ECN_set_ce(skb));
555 cobalt_invsqrt(vars);
556 vars->drop_next = cobalt_control(vars->drop_next,
559 schedule = ktime_sub(now, vars->drop_next);
563 cobalt_invsqrt(vars);
564 vars->drop_next = cobalt_control(vars->drop_next,
567 schedule = ktime_sub(now, vars->drop_next);
568 next_due = vars->count && ktime_to_ns(schedule) >= 0;
572 /* Simple BLUE implementation. Lack of ECN is deliberate. */
574 drop |= (prandom_u32() < vars->p_drop);
576 /* Overload the drop_next field as an activity timeout */
578 vars->drop_next = ktime_add_ns(now, p->interval);
579 else if (ktime_to_ns(schedule) > 0 && !drop)
580 vars->drop_next = now;
585 static void cake_update_flowkeys(struct flow_keys *keys,
586 const struct sk_buff *skb)
588 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
589 struct nf_conntrack_tuple tuple = {};
590 bool rev = !skb->_nfct;
592 if (skb_protocol(skb, true) != htons(ETH_P_IP))
595 if (!nf_ct_get_tuple_skb(&tuple, skb))
598 keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
599 keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
601 if (keys->ports.ports) {
602 keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all;
603 keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all;
608 /* Cake has several subtle multiple bit settings. In these cases you
609 * would be matching triple isolate mode as well.
612 static bool cake_dsrc(int flow_mode)
614 return (flow_mode & CAKE_FLOW_DUAL_SRC) == CAKE_FLOW_DUAL_SRC;
617 static bool cake_ddst(int flow_mode)
619 return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
622 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
623 int flow_mode, u16 flow_override, u16 host_override)
625 u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
626 u16 reduced_hash, srchost_idx, dsthost_idx;
627 struct flow_keys keys, host_keys;
629 if (unlikely(flow_mode == CAKE_FLOW_NONE))
632 /* If both overrides are set we can skip packet dissection entirely */
633 if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) &&
634 (host_override || !(flow_mode & CAKE_FLOW_HOSTS)))
637 skb_flow_dissect_flow_keys(skb, &keys,
638 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
640 if (flow_mode & CAKE_FLOW_NAT_FLAG)
641 cake_update_flowkeys(&keys, skb);
643 /* flow_hash_from_keys() sorts the addresses by value, so we have
644 * to preserve their order in a separate data structure to treat
645 * src and dst host addresses as independently selectable.
648 host_keys.ports.ports = 0;
649 host_keys.basic.ip_proto = 0;
650 host_keys.keyid.keyid = 0;
651 host_keys.tags.flow_label = 0;
653 switch (host_keys.control.addr_type) {
654 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
655 host_keys.addrs.v4addrs.src = 0;
656 dsthost_hash = flow_hash_from_keys(&host_keys);
657 host_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
658 host_keys.addrs.v4addrs.dst = 0;
659 srchost_hash = flow_hash_from_keys(&host_keys);
662 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
663 memset(&host_keys.addrs.v6addrs.src, 0,
664 sizeof(host_keys.addrs.v6addrs.src));
665 dsthost_hash = flow_hash_from_keys(&host_keys);
666 host_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
667 memset(&host_keys.addrs.v6addrs.dst, 0,
668 sizeof(host_keys.addrs.v6addrs.dst));
669 srchost_hash = flow_hash_from_keys(&host_keys);
677 /* This *must* be after the above switch, since as a
678 * side-effect it sorts the src and dst addresses.
680 if (flow_mode & CAKE_FLOW_FLOWS)
681 flow_hash = flow_hash_from_keys(&keys);
685 flow_hash = flow_override - 1;
687 dsthost_hash = host_override - 1;
688 srchost_hash = host_override - 1;
691 if (!(flow_mode & CAKE_FLOW_FLOWS)) {
692 if (flow_mode & CAKE_FLOW_SRC_IP)
693 flow_hash ^= srchost_hash;
695 if (flow_mode & CAKE_FLOW_DST_IP)
696 flow_hash ^= dsthost_hash;
699 reduced_hash = flow_hash % CAKE_QUEUES;
701 /* set-associative hashing */
702 /* fast path if no hash collision (direct lookup succeeds) */
703 if (likely(q->tags[reduced_hash] == flow_hash &&
704 q->flows[reduced_hash].set)) {
707 u32 inner_hash = reduced_hash % CAKE_SET_WAYS;
708 u32 outer_hash = reduced_hash - inner_hash;
709 bool allocate_src = false;
710 bool allocate_dst = false;
713 /* check if any active queue in the set is reserved for
716 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
717 i++, k = (k + 1) % CAKE_SET_WAYS) {
718 if (q->tags[outer_hash + k] == flow_hash) {
722 if (!q->flows[outer_hash + k].set) {
723 /* need to increment host refcnts */
724 allocate_src = cake_dsrc(flow_mode);
725 allocate_dst = cake_ddst(flow_mode);
732 /* no queue is reserved for this flow, look for an
735 for (i = 0; i < CAKE_SET_WAYS;
736 i++, k = (k + 1) % CAKE_SET_WAYS) {
737 if (!q->flows[outer_hash + k].set) {
739 allocate_src = cake_dsrc(flow_mode);
740 allocate_dst = cake_ddst(flow_mode);
745 /* With no empty queues, default to the original
746 * queue, accept the collision, update the host tags.
749 q->hosts[q->flows[reduced_hash].srchost].srchost_refcnt--;
750 q->hosts[q->flows[reduced_hash].dsthost].dsthost_refcnt--;
751 allocate_src = cake_dsrc(flow_mode);
752 allocate_dst = cake_ddst(flow_mode);
754 /* reserve queue for future packets in same flow */
755 reduced_hash = outer_hash + k;
756 q->tags[reduced_hash] = flow_hash;
759 srchost_idx = srchost_hash % CAKE_QUEUES;
760 inner_hash = srchost_idx % CAKE_SET_WAYS;
761 outer_hash = srchost_idx - inner_hash;
762 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
763 i++, k = (k + 1) % CAKE_SET_WAYS) {
764 if (q->hosts[outer_hash + k].srchost_tag ==
768 for (i = 0; i < CAKE_SET_WAYS;
769 i++, k = (k + 1) % CAKE_SET_WAYS) {
770 if (!q->hosts[outer_hash + k].srchost_refcnt)
773 q->hosts[outer_hash + k].srchost_tag = srchost_hash;
775 srchost_idx = outer_hash + k;
776 q->hosts[srchost_idx].srchost_refcnt++;
777 q->flows[reduced_hash].srchost = srchost_idx;
781 dsthost_idx = dsthost_hash % CAKE_QUEUES;
782 inner_hash = dsthost_idx % CAKE_SET_WAYS;
783 outer_hash = dsthost_idx - inner_hash;
784 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
785 i++, k = (k + 1) % CAKE_SET_WAYS) {
786 if (q->hosts[outer_hash + k].dsthost_tag ==
790 for (i = 0; i < CAKE_SET_WAYS;
791 i++, k = (k + 1) % CAKE_SET_WAYS) {
792 if (!q->hosts[outer_hash + k].dsthost_refcnt)
795 q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
797 dsthost_idx = outer_hash + k;
798 q->hosts[dsthost_idx].dsthost_refcnt++;
799 q->flows[reduced_hash].dsthost = dsthost_idx;
806 /* helper functions : might be changed when/if skb use a standard list_head */
807 /* remove one skb from head of slot queue */
809 static struct sk_buff *dequeue_head(struct cake_flow *flow)
811 struct sk_buff *skb = flow->head;
814 flow->head = skb->next;
821 /* add skb to flow queue (tail add) */
823 static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb)
828 flow->tail->next = skb;
833 static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
836 unsigned int offset = skb_network_offset(skb);
839 iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
844 if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
845 return skb_header_pointer(skb, offset + iph->ihl * 4,
846 sizeof(struct ipv6hdr), buf);
848 else if (iph->version == 4)
851 else if (iph->version == 6)
852 return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
858 static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
859 void *buf, unsigned int bufsize)
861 unsigned int offset = skb_network_offset(skb);
862 const struct ipv6hdr *ipv6h;
863 const struct tcphdr *tcph;
864 const struct iphdr *iph;
865 struct ipv6hdr _ipv6h;
868 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
873 if (ipv6h->version == 4) {
874 iph = (struct iphdr *)ipv6h;
875 offset += iph->ihl * 4;
877 /* special-case 6in4 tunnelling, as that is a common way to get
878 * v6 connectivity in the home
880 if (iph->protocol == IPPROTO_IPV6) {
881 ipv6h = skb_header_pointer(skb, offset,
882 sizeof(_ipv6h), &_ipv6h);
884 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
887 offset += sizeof(struct ipv6hdr);
889 } else if (iph->protocol != IPPROTO_TCP) {
893 } else if (ipv6h->version == 6) {
894 if (ipv6h->nexthdr != IPPROTO_TCP)
897 offset += sizeof(struct ipv6hdr);
902 tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
903 if (!tcph || tcph->doff < 5)
906 return skb_header_pointer(skb, offset,
907 min(__tcp_hdrlen(tcph), bufsize), buf);
910 static const void *cake_get_tcpopt(const struct tcphdr *tcph,
911 int code, int *oplen)
913 /* inspired by tcp_parse_options in tcp_input.c */
914 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
915 const u8 *ptr = (const u8 *)(tcph + 1);
921 if (opcode == TCPOPT_EOL)
923 if (opcode == TCPOPT_NOP) {
930 if (opsize < 2 || opsize > length)
933 if (opcode == code) {
945 /* Compare two SACK sequences. A sequence is considered greater if it SACKs more
946 * bytes than the other. In the case where both sequences ACKs bytes that the
947 * other doesn't, A is considered greater. DSACKs in A also makes A be
948 * considered greater.
950 * @return -1, 0 or 1 as normal compare functions
952 static int cake_tcph_sack_compare(const struct tcphdr *tcph_a,
953 const struct tcphdr *tcph_b)
955 const struct tcp_sack_block_wire *sack_a, *sack_b;
956 u32 ack_seq_a = ntohl(tcph_a->ack_seq);
957 u32 bytes_a = 0, bytes_b = 0;
958 int oplen_a, oplen_b;
961 sack_a = cake_get_tcpopt(tcph_a, TCPOPT_SACK, &oplen_a);
962 sack_b = cake_get_tcpopt(tcph_b, TCPOPT_SACK, &oplen_b);
964 /* pointers point to option contents */
965 oplen_a -= TCPOLEN_SACK_BASE;
966 oplen_b -= TCPOLEN_SACK_BASE;
968 if (sack_a && oplen_a >= sizeof(*sack_a) &&
969 (!sack_b || oplen_b < sizeof(*sack_b)))
971 else if (sack_b && oplen_b >= sizeof(*sack_b) &&
972 (!sack_a || oplen_a < sizeof(*sack_a)))
974 else if ((!sack_a || oplen_a < sizeof(*sack_a)) &&
975 (!sack_b || oplen_b < sizeof(*sack_b)))
978 while (oplen_a >= sizeof(*sack_a)) {
979 const struct tcp_sack_block_wire *sack_tmp = sack_b;
980 u32 start_a = get_unaligned_be32(&sack_a->start_seq);
981 u32 end_a = get_unaligned_be32(&sack_a->end_seq);
982 int oplen_tmp = oplen_b;
985 /* DSACK; always considered greater to prevent dropping */
986 if (before(start_a, ack_seq_a))
989 bytes_a += end_a - start_a;
991 while (oplen_tmp >= sizeof(*sack_tmp)) {
992 u32 start_b = get_unaligned_be32(&sack_tmp->start_seq);
993 u32 end_b = get_unaligned_be32(&sack_tmp->end_seq);
995 /* first time through we count the total size */
997 bytes_b += end_b - start_b;
999 if (!after(start_b, start_a) && !before(end_b, end_a)) {
1004 oplen_tmp -= sizeof(*sack_tmp);
1011 oplen_a -= sizeof(*sack_a);
1016 /* If we made it this far, all ranges SACKed by A are covered by B, so
1017 * either the SACKs are equal, or B SACKs more bytes.
1019 return bytes_b > bytes_a ? 1 : 0;
1022 static void cake_tcph_get_tstamp(const struct tcphdr *tcph,
1023 u32 *tsval, u32 *tsecr)
1028 ptr = cake_get_tcpopt(tcph, TCPOPT_TIMESTAMP, &opsize);
1030 if (ptr && opsize == TCPOLEN_TIMESTAMP) {
1031 *tsval = get_unaligned_be32(ptr);
1032 *tsecr = get_unaligned_be32(ptr + 4);
1036 static bool cake_tcph_may_drop(const struct tcphdr *tcph,
1037 u32 tstamp_new, u32 tsecr_new)
1039 /* inspired by tcp_parse_options in tcp_input.c */
1040 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
1041 const u8 *ptr = (const u8 *)(tcph + 1);
1044 /* 3 reserved flags must be unset to avoid future breakage
1046 * ECE/CWR are handled separately
1047 * All other flags URG/PSH/RST/SYN/FIN must be unset
1048 * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero)
1049 * 0x00C00000 = CWR/ECE (handled separately)
1050 * 0x0F3F0000 = 0x0FFF0000 & ~0x00C00000
1052 if (((tcp_flag_word(tcph) &
1053 cpu_to_be32(0x0F3F0000)) != TCP_FLAG_ACK))
1056 while (length > 0) {
1057 int opcode = *ptr++;
1060 if (opcode == TCPOPT_EOL)
1062 if (opcode == TCPOPT_NOP) {
1069 if (opsize < 2 || opsize > length)
1073 case TCPOPT_MD5SIG: /* doesn't influence state */
1076 case TCPOPT_SACK: /* stricter checking performed later */
1077 if (opsize % 8 != 2)
1081 case TCPOPT_TIMESTAMP:
1082 /* only drop timestamps lower than new */
1083 if (opsize != TCPOLEN_TIMESTAMP)
1085 tstamp = get_unaligned_be32(ptr);
1086 tsecr = get_unaligned_be32(ptr + 4);
1087 if (after(tstamp, tstamp_new) ||
1088 after(tsecr, tsecr_new))
1092 case TCPOPT_MSS: /* these should only be set on SYN */
1094 case TCPOPT_SACK_PERM:
1095 case TCPOPT_FASTOPEN:
1097 default: /* don't drop if any unknown options are present */
1108 static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
1109 struct cake_flow *flow)
1111 bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
1112 struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL;
1113 struct sk_buff *skb_check, *skb_prev = NULL;
1114 const struct ipv6hdr *ipv6h, *ipv6h_check;
1115 unsigned char _tcph[64], _tcph_check[64];
1116 const struct tcphdr *tcph, *tcph_check;
1117 const struct iphdr *iph, *iph_check;
1118 struct ipv6hdr _iph, _iph_check;
1119 const struct sk_buff *skb;
1120 int seglen, num_found = 0;
1121 u32 tstamp = 0, tsecr = 0;
1122 __be32 elig_flags = 0;
1125 /* no other possible ACKs to filter */
1126 if (flow->head == flow->tail)
1130 tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph));
1131 iph = cake_get_iphdr(skb, &_iph);
1135 cake_tcph_get_tstamp(tcph, &tstamp, &tsecr);
1137 /* the 'triggering' packet need only have the ACK flag set.
1138 * also check that SYN is not set, as there won't be any previous ACKs.
1140 if ((tcp_flag_word(tcph) &
1141 (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK)
1144 /* the 'triggering' ACK is at the tail of the queue, we have already
1145 * returned if it is the only packet in the flow. loop through the rest
1146 * of the queue looking for pure ACKs with the same 5-tuple as the
1149 for (skb_check = flow->head;
1150 skb_check && skb_check != skb;
1151 skb_prev = skb_check, skb_check = skb_check->next) {
1152 iph_check = cake_get_iphdr(skb_check, &_iph_check);
1153 tcph_check = cake_get_tcphdr(skb_check, &_tcph_check,
1154 sizeof(_tcph_check));
1156 /* only TCP packets with matching 5-tuple are eligible, and only
1159 if (!tcph_check || iph->version != iph_check->version ||
1160 tcph_check->source != tcph->source ||
1161 tcph_check->dest != tcph->dest)
1164 if (iph_check->version == 4) {
1165 if (iph_check->saddr != iph->saddr ||
1166 iph_check->daddr != iph->daddr)
1169 seglen = ntohs(iph_check->tot_len) -
1170 (4 * iph_check->ihl);
1171 } else if (iph_check->version == 6) {
1172 ipv6h = (struct ipv6hdr *)iph;
1173 ipv6h_check = (struct ipv6hdr *)iph_check;
1175 if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) ||
1176 ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr))
1179 seglen = ntohs(ipv6h_check->payload_len);
1181 WARN_ON(1); /* shouldn't happen */
1185 /* If the ECE/CWR flags changed from the previous eligible
1186 * packet in the same flow, we should no longer be dropping that
1187 * previous packet as this would lose information.
1189 if (elig_ack && (tcp_flag_word(tcph_check) &
1190 (TCP_FLAG_ECE | TCP_FLAG_CWR)) != elig_flags) {
1192 elig_ack_prev = NULL;
1196 /* Check TCP options and flags, don't drop ACKs with segment
1197 * data, and don't drop ACKs with a higher cumulative ACK
1198 * counter than the triggering packet. Check ACK seqno here to
1199 * avoid parsing SACK options of packets we are going to exclude
1202 if (!cake_tcph_may_drop(tcph_check, tstamp, tsecr) ||
1203 (seglen - __tcp_hdrlen(tcph_check)) != 0 ||
1204 after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq)))
1207 /* Check SACK options. The triggering packet must SACK more data
1208 * than the ACK under consideration, or SACK the same range but
1209 * have a larger cumulative ACK counter. The latter is a
1210 * pathological case, but is contained in the following check
1211 * anyway, just to be safe.
1213 sack_comp = cake_tcph_sack_compare(tcph_check, tcph);
1215 if (sack_comp < 0 ||
1216 (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) &&
1220 /* At this point we have found an eligible pure ACK to drop; if
1221 * we are in aggressive mode, we are done. Otherwise, keep
1222 * searching unless this is the second eligible ACK we
1225 * Since we want to drop ACK closest to the head of the queue,
1226 * save the first eligible ACK we find, even if we need to loop
1230 elig_ack = skb_check;
1231 elig_ack_prev = skb_prev;
1232 elig_flags = (tcp_flag_word(tcph_check)
1233 & (TCP_FLAG_ECE | TCP_FLAG_CWR));
1236 if (num_found++ > 0)
1240 /* We made it through the queue without finding two eligible ACKs . If
1241 * we found a single eligible ACK we can drop it in aggressive mode if
1242 * we can guarantee that this does not interfere with ECN flag
1243 * information. We ensure this by dropping it only if the enqueued
1244 * packet is consecutive with the eligible ACK, and their flags match.
1246 if (elig_ack && aggressive && elig_ack->next == skb &&
1247 (elig_flags == (tcp_flag_word(tcph) &
1248 (TCP_FLAG_ECE | TCP_FLAG_CWR))))
1255 elig_ack_prev->next = elig_ack->next;
1257 flow->head = elig_ack->next;
1259 elig_ack->next = NULL;
1264 static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
1266 avg -= avg >> shift;
1267 avg += sample >> shift;
1271 static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
1273 if (q->rate_flags & CAKE_FLAG_OVERHEAD)
1276 if (q->max_netlen < len)
1277 q->max_netlen = len;
1278 if (q->min_netlen > len)
1279 q->min_netlen = len;
1281 len += q->rate_overhead;
1283 if (len < q->rate_mpu)
1286 if (q->atm_mode == CAKE_ATM_ATM) {
1290 } else if (q->atm_mode == CAKE_ATM_PTM) {
1291 /* Add one byte per 64 bytes or part thereof.
1292 * This is conservative and easier to calculate than the
1295 len += (len + 63) / 64;
1298 if (q->max_adjlen < len)
1299 q->max_adjlen = len;
1300 if (q->min_adjlen > len)
1301 q->min_adjlen = len;
1306 static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
1308 const struct skb_shared_info *shinfo = skb_shinfo(skb);
1309 unsigned int hdr_len, last_len = 0;
1310 u32 off = skb_network_offset(skb);
1311 u32 len = qdisc_pkt_len(skb);
1314 q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
1316 if (!shinfo->gso_size)
1317 return cake_calc_overhead(q, len, off);
1319 /* borrowed from qdisc_pkt_len_init() */
1320 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
1322 /* + transport layer */
1323 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
1325 const struct tcphdr *th;
1326 struct tcphdr _tcphdr;
1328 th = skb_header_pointer(skb, skb_transport_offset(skb),
1329 sizeof(_tcphdr), &_tcphdr);
1331 hdr_len += __tcp_hdrlen(th);
1333 struct udphdr _udphdr;
1335 if (skb_header_pointer(skb, skb_transport_offset(skb),
1336 sizeof(_udphdr), &_udphdr))
1337 hdr_len += sizeof(struct udphdr);
1340 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
1341 segs = DIV_ROUND_UP(skb->len - hdr_len,
1344 segs = shinfo->gso_segs;
1346 len = shinfo->gso_size + hdr_len;
1347 last_len = skb->len - shinfo->gso_size * (segs - 1);
1349 return (cake_calc_overhead(q, len, off) * (segs - 1) +
1350 cake_calc_overhead(q, last_len, off));
1353 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
1355 struct cake_heap_entry ii = q->overflow_heap[i];
1356 struct cake_heap_entry jj = q->overflow_heap[j];
1358 q->overflow_heap[i] = jj;
1359 q->overflow_heap[j] = ii;
1361 q->tins[ii.t].overflow_idx[ii.b] = j;
1362 q->tins[jj.t].overflow_idx[jj.b] = i;
1365 static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
1367 struct cake_heap_entry ii = q->overflow_heap[i];
1369 return q->tins[ii.t].backlogs[ii.b];
1372 static void cake_heapify(struct cake_sched_data *q, u16 i)
1374 static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES;
1375 u32 mb = cake_heap_get_backlog(q, i);
1383 u32 lb = cake_heap_get_backlog(q, l);
1392 u32 rb = cake_heap_get_backlog(q, r);
1401 cake_heap_swap(q, i, m);
1409 static void cake_heapify_up(struct cake_sched_data *q, u16 i)
1411 while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) {
1412 u16 p = (i - 1) >> 1;
1413 u32 ib = cake_heap_get_backlog(q, i);
1414 u32 pb = cake_heap_get_backlog(q, p);
1417 cake_heap_swap(q, i, p);
1425 static int cake_advance_shaper(struct cake_sched_data *q,
1426 struct cake_tin_data *b,
1427 struct sk_buff *skb,
1428 ktime_t now, bool drop)
1430 u32 len = get_cobalt_cb(skb)->adjusted_len;
1432 /* charge packet bandwidth to this tin
1433 * and to the global shaper.
1436 u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft;
1437 u64 global_dur = (len * q->rate_ns) >> q->rate_shft;
1438 u64 failsafe_dur = global_dur + (global_dur >> 1);
1440 if (ktime_before(b->time_next_packet, now))
1441 b->time_next_packet = ktime_add_ns(b->time_next_packet,
1444 else if (ktime_before(b->time_next_packet,
1445 ktime_add_ns(now, tin_dur)))
1446 b->time_next_packet = ktime_add_ns(now, tin_dur);
1448 q->time_next_packet = ktime_add_ns(q->time_next_packet,
1451 q->failsafe_next_packet = \
1452 ktime_add_ns(q->failsafe_next_packet,
1458 static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
1460 struct cake_sched_data *q = qdisc_priv(sch);
1461 ktime_t now = ktime_get();
1462 u32 idx = 0, tin = 0, len;
1463 struct cake_heap_entry qq;
1464 struct cake_tin_data *b;
1465 struct cake_flow *flow;
1466 struct sk_buff *skb;
1468 if (!q->overflow_timeout) {
1470 /* Build fresh max-heap */
1471 for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2; i >= 0; i--)
1474 q->overflow_timeout = 65535;
1476 /* select longest queue for pruning */
1477 qq = q->overflow_heap[0];
1482 flow = &b->flows[idx];
1483 skb = dequeue_head(flow);
1484 if (unlikely(!skb)) {
1485 /* heap has gone wrong, rebuild it next time */
1486 q->overflow_timeout = 0;
1487 return idx + (tin << 16);
1490 if (cobalt_queue_full(&flow->cvars, &b->cparams, now))
1491 b->unresponsive_flow_count++;
1493 len = qdisc_pkt_len(skb);
1494 q->buffer_used -= skb->truesize;
1495 b->backlogs[idx] -= len;
1496 b->tin_backlog -= len;
1497 sch->qstats.backlog -= len;
1498 qdisc_tree_reduce_backlog(sch, 1, len);
1502 sch->qstats.drops++;
1504 if (q->rate_flags & CAKE_FLAG_INGRESS)
1505 cake_advance_shaper(q, b, skb, now, true);
1507 __qdisc_drop(skb, to_free);
1512 return idx + (tin << 16);
1515 static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
1517 const int offset = skb_network_offset(skb);
1521 switch (skb_protocol(skb, true)) {
1522 case htons(ETH_P_IP):
1523 buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
1527 /* ToS is in the second byte of iphdr */
1528 dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
1531 const int wlen = offset + sizeof(struct iphdr);
1533 if (!pskb_may_pull(skb, wlen) ||
1534 skb_try_make_writable(skb, wlen))
1537 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
1542 case htons(ETH_P_IPV6):
1543 buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
1547 /* Traffic class is in the first and second bytes of ipv6hdr */
1548 dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
1551 const int wlen = offset + sizeof(struct ipv6hdr);
1553 if (!pskb_may_pull(skb, wlen) ||
1554 skb_try_make_writable(skb, wlen))
1557 ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
1562 case htons(ETH_P_ARP):
1563 return 0x38; /* CS7 - Net Control */
1566 /* If there is no Diffserv field, treat as best-effort */
1571 static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
1572 struct sk_buff *skb)
1574 struct cake_sched_data *q = qdisc_priv(sch);
1579 /* Tin selection: Default to diffserv-based selection, allow overriding
1580 * using firewall marks or skb->priority. Call DSCP parsing early if
1581 * wash is enabled, otherwise defer to below to skip unneeded parsing.
1583 wash = !!(q->rate_flags & CAKE_FLAG_WASH);
1585 dscp = cake_handle_diffserv(skb, wash);
1587 if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
1590 else if (TC_H_MAJ(skb->priority) == sch->handle &&
1591 TC_H_MIN(skb->priority) > 0 &&
1592 TC_H_MIN(skb->priority) <= q->tin_cnt)
1593 tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
1597 dscp = cake_handle_diffserv(skb, wash);
1598 tin = q->tin_index[dscp];
1600 if (unlikely(tin >= q->tin_cnt))
1604 return &q->tins[tin];
1607 static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
1608 struct sk_buff *skb, int flow_mode, int *qerr)
1610 struct cake_sched_data *q = qdisc_priv(sch);
1611 struct tcf_proto *filter;
1612 struct tcf_result res;
1613 u16 flow = 0, host = 0;
1616 filter = rcu_dereference_bh(q->filter_list);
1620 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1621 result = tcf_classify(skb, filter, &res, false);
1624 #ifdef CONFIG_NET_CLS_ACT
1629 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1635 if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
1636 flow = TC_H_MIN(res.classid);
1637 if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
1638 host = TC_H_MAJ(res.classid) >> 16;
1641 *t = cake_select_tin(sch, skb);
1642 return cake_hash(*t, skb, flow_mode, flow, host) + 1;
1645 static void cake_reconfigure(struct Qdisc *sch);
1647 static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1648 struct sk_buff **to_free)
1650 struct cake_sched_data *q = qdisc_priv(sch);
1651 int len = qdisc_pkt_len(skb);
1652 int uninitialized_var(ret);
1653 struct sk_buff *ack = NULL;
1654 ktime_t now = ktime_get();
1655 struct cake_tin_data *b;
1656 struct cake_flow *flow;
1659 /* choose flow to insert into */
1660 idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
1662 if (ret & __NET_XMIT_BYPASS)
1663 qdisc_qstats_drop(sch);
1664 __qdisc_drop(skb, to_free);
1668 flow = &b->flows[idx];
1670 /* ensure shaper state isn't stale */
1671 if (!b->tin_backlog) {
1672 if (ktime_before(b->time_next_packet, now))
1673 b->time_next_packet = now;
1676 if (ktime_before(q->time_next_packet, now)) {
1677 q->failsafe_next_packet = now;
1678 q->time_next_packet = now;
1679 } else if (ktime_after(q->time_next_packet, now) &&
1680 ktime_after(q->failsafe_next_packet, now)) {
1682 min(ktime_to_ns(q->time_next_packet),
1684 q->failsafe_next_packet));
1685 sch->qstats.overlimits++;
1686 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1691 if (unlikely(len > b->max_skblen))
1692 b->max_skblen = len;
1694 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
1695 struct sk_buff *segs, *nskb;
1696 netdev_features_t features = netif_skb_features(skb);
1697 unsigned int slen = 0, numsegs = 0;
1699 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
1700 if (IS_ERR_OR_NULL(segs))
1701 return qdisc_drop(skb, sch, to_free);
1706 qdisc_skb_cb(segs)->pkt_len = segs->len;
1707 cobalt_set_enqueue_time(segs, now);
1708 get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
1710 flow_queue_add(flow, segs);
1715 q->buffer_used += segs->truesize;
1722 b->backlogs[idx] += slen;
1723 b->tin_backlog += slen;
1724 sch->qstats.backlog += slen;
1725 q->avg_window_bytes += slen;
1727 qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
1731 cobalt_set_enqueue_time(skb, now);
1732 get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
1733 flow_queue_add(flow, skb);
1736 ack = cake_ack_filter(q, flow);
1740 sch->qstats.drops++;
1741 b->bytes += qdisc_pkt_len(ack);
1742 len -= qdisc_pkt_len(ack);
1743 q->buffer_used += skb->truesize - ack->truesize;
1744 if (q->rate_flags & CAKE_FLAG_INGRESS)
1745 cake_advance_shaper(q, b, ack, now, true);
1747 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
1751 q->buffer_used += skb->truesize;
1757 b->backlogs[idx] += len;
1758 b->tin_backlog += len;
1759 sch->qstats.backlog += len;
1760 q->avg_window_bytes += len;
1763 if (q->overflow_timeout)
1764 cake_heapify_up(q, b->overflow_idx[idx]);
1766 /* incoming bandwidth capacity estimate */
1767 if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
1768 u64 packet_interval = \
1769 ktime_to_ns(ktime_sub(now, q->last_packet_time));
1771 if (packet_interval > NSEC_PER_SEC)
1772 packet_interval = NSEC_PER_SEC;
1774 /* filter out short-term bursts, eg. wifi aggregation */
1775 q->avg_packet_interval = \
1776 cake_ewma(q->avg_packet_interval,
1778 (packet_interval > q->avg_packet_interval ?
1781 q->last_packet_time = now;
1783 if (packet_interval > q->avg_packet_interval) {
1784 u64 window_interval = \
1785 ktime_to_ns(ktime_sub(now,
1786 q->avg_window_begin));
1787 u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
1789 b = div64_u64(b, window_interval);
1790 q->avg_peak_bandwidth =
1791 cake_ewma(q->avg_peak_bandwidth, b,
1792 b > q->avg_peak_bandwidth ? 2 : 8);
1793 q->avg_window_bytes = 0;
1794 q->avg_window_begin = now;
1796 if (ktime_after(now,
1797 ktime_add_ms(q->last_reconfig_time,
1799 q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
1800 cake_reconfigure(sch);
1804 q->avg_window_bytes = 0;
1805 q->last_packet_time = now;
1809 if (!flow->set || flow->set == CAKE_SET_DECAYING) {
1810 struct cake_host *srchost = &b->hosts[flow->srchost];
1811 struct cake_host *dsthost = &b->hosts[flow->dsthost];
1815 list_add_tail(&flow->flowchain, &b->new_flows);
1817 b->decaying_flow_count--;
1818 list_move_tail(&flow->flowchain, &b->new_flows);
1820 flow->set = CAKE_SET_SPARSE;
1821 b->sparse_flow_count++;
1823 if (cake_dsrc(q->flow_mode))
1824 host_load = max(host_load, srchost->srchost_refcnt);
1826 if (cake_ddst(q->flow_mode))
1827 host_load = max(host_load, dsthost->dsthost_refcnt);
1829 flow->deficit = (b->flow_quantum *
1830 quantum_div[host_load]) >> 16;
1831 } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
1832 /* this flow was empty, accounted as a sparse flow, but actually
1833 * in the bulk rotation.
1835 flow->set = CAKE_SET_BULK;
1836 b->sparse_flow_count--;
1837 b->bulk_flow_count++;
1840 if (q->buffer_used > q->buffer_max_used)
1841 q->buffer_max_used = q->buffer_used;
1843 if (q->buffer_used > q->buffer_limit) {
1846 while (q->buffer_used > q->buffer_limit) {
1848 cake_drop(sch, to_free);
1850 b->drop_overlimit += dropped;
1852 return NET_XMIT_SUCCESS;
1855 static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
1857 struct cake_sched_data *q = qdisc_priv(sch);
1858 struct cake_tin_data *b = &q->tins[q->cur_tin];
1859 struct cake_flow *flow = &b->flows[q->cur_flow];
1860 struct sk_buff *skb = NULL;
1864 skb = dequeue_head(flow);
1865 len = qdisc_pkt_len(skb);
1866 b->backlogs[q->cur_flow] -= len;
1867 b->tin_backlog -= len;
1868 sch->qstats.backlog -= len;
1869 q->buffer_used -= skb->truesize;
1872 if (q->overflow_timeout)
1873 cake_heapify(q, b->overflow_idx[q->cur_flow]);
1878 /* Discard leftover packets from a tin no longer in use. */
1879 static void cake_clear_tin(struct Qdisc *sch, u16 tin)
1881 struct cake_sched_data *q = qdisc_priv(sch);
1882 struct sk_buff *skb;
1885 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
1886 while (!!(skb = cake_dequeue_one(sch)))
1890 static struct sk_buff *cake_dequeue(struct Qdisc *sch)
1892 struct cake_sched_data *q = qdisc_priv(sch);
1893 struct cake_tin_data *b = &q->tins[q->cur_tin];
1894 struct cake_host *srchost, *dsthost;
1895 ktime_t now = ktime_get();
1896 struct cake_flow *flow;
1897 struct list_head *head;
1898 bool first_flow = true;
1899 struct sk_buff *skb;
1908 /* global hard shaper */
1909 if (ktime_after(q->time_next_packet, now) &&
1910 ktime_after(q->failsafe_next_packet, now)) {
1911 u64 next = min(ktime_to_ns(q->time_next_packet),
1912 ktime_to_ns(q->failsafe_next_packet));
1914 sch->qstats.overlimits++;
1915 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1919 /* Choose a class to work on. */
1921 /* In unlimited mode, can't rely on shaper timings, just balance
1924 bool wrapped = false, empty = true;
1926 while (b->tin_deficit < 0 ||
1927 !(b->sparse_flow_count + b->bulk_flow_count)) {
1928 if (b->tin_deficit <= 0)
1929 b->tin_deficit += b->tin_quantum_band;
1930 if (b->sparse_flow_count + b->bulk_flow_count)
1935 if (q->cur_tin >= q->tin_cnt) {
1940 /* It's possible for q->qlen to be
1941 * nonzero when we actually have no
1952 /* In shaped mode, choose:
1953 * - Highest-priority tin with queue and meeting schedule, or
1954 * - The earliest-scheduled tin with queue.
1956 ktime_t best_time = KTIME_MAX;
1957 int tin, best_tin = 0;
1959 for (tin = 0; tin < q->tin_cnt; tin++) {
1961 if ((b->sparse_flow_count + b->bulk_flow_count) > 0) {
1962 ktime_t time_to_pkt = \
1963 ktime_sub(b->time_next_packet, now);
1965 if (ktime_to_ns(time_to_pkt) <= 0 ||
1966 ktime_compare(time_to_pkt,
1968 best_time = time_to_pkt;
1974 q->cur_tin = best_tin;
1975 b = q->tins + best_tin;
1977 /* No point in going further if no packets to deliver. */
1978 if (unlikely(!(b->sparse_flow_count + b->bulk_flow_count)))
1983 /* service this class */
1984 head = &b->decaying_flows;
1985 if (!first_flow || list_empty(head)) {
1986 head = &b->new_flows;
1987 if (list_empty(head)) {
1988 head = &b->old_flows;
1989 if (unlikely(list_empty(head))) {
1990 head = &b->decaying_flows;
1991 if (unlikely(list_empty(head)))
1996 flow = list_first_entry(head, struct cake_flow, flowchain);
1997 q->cur_flow = flow - b->flows;
2000 /* triple isolation (modified DRR++) */
2001 srchost = &b->hosts[flow->srchost];
2002 dsthost = &b->hosts[flow->dsthost];
2005 if (cake_dsrc(q->flow_mode))
2006 host_load = max(host_load, srchost->srchost_refcnt);
2008 if (cake_ddst(q->flow_mode))
2009 host_load = max(host_load, dsthost->dsthost_refcnt);
2011 WARN_ON(host_load > CAKE_QUEUES);
2013 /* flow isolation (DRR++) */
2014 if (flow->deficit <= 0) {
2015 /* The shifted prandom_u32() is a way to apply dithering to
2016 * avoid accumulating roundoff errors
2018 flow->deficit += (b->flow_quantum * quantum_div[host_load] +
2019 (prandom_u32() >> 16)) >> 16;
2020 list_move_tail(&flow->flowchain, &b->old_flows);
2022 /* Keep all flows with deficits out of the sparse and decaying
2023 * rotations. No non-empty flow can go into the decaying
2024 * rotation, so they can't get deficits
2026 if (flow->set == CAKE_SET_SPARSE) {
2028 b->sparse_flow_count--;
2029 b->bulk_flow_count++;
2030 flow->set = CAKE_SET_BULK;
2032 /* we've moved it to the bulk rotation for
2033 * correct deficit accounting but we still want
2034 * to count it as a sparse flow, not a bulk one.
2036 flow->set = CAKE_SET_SPARSE_WAIT;
2042 /* Retrieve a packet via the AQM */
2044 skb = cake_dequeue_one(sch);
2046 /* this queue was actually empty */
2047 if (cobalt_queue_empty(&flow->cvars, &b->cparams, now))
2048 b->unresponsive_flow_count--;
2050 if (flow->cvars.p_drop || flow->cvars.count ||
2051 ktime_before(now, flow->cvars.drop_next)) {
2052 /* keep in the flowchain until the state has
2055 list_move_tail(&flow->flowchain,
2056 &b->decaying_flows);
2057 if (flow->set == CAKE_SET_BULK) {
2058 b->bulk_flow_count--;
2059 b->decaying_flow_count++;
2060 } else if (flow->set == CAKE_SET_SPARSE ||
2061 flow->set == CAKE_SET_SPARSE_WAIT) {
2062 b->sparse_flow_count--;
2063 b->decaying_flow_count++;
2065 flow->set = CAKE_SET_DECAYING;
2067 /* remove empty queue from the flowchain */
2068 list_del_init(&flow->flowchain);
2069 if (flow->set == CAKE_SET_SPARSE ||
2070 flow->set == CAKE_SET_SPARSE_WAIT)
2071 b->sparse_flow_count--;
2072 else if (flow->set == CAKE_SET_BULK)
2073 b->bulk_flow_count--;
2075 b->decaying_flow_count--;
2077 flow->set = CAKE_SET_NONE;
2078 srchost->srchost_refcnt--;
2079 dsthost->dsthost_refcnt--;
2084 /* Last packet in queue may be marked, shouldn't be dropped */
2085 if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
2086 (b->bulk_flow_count *
2088 CAKE_FLAG_INGRESS))) ||
2092 /* drop this packet, get another one */
2093 if (q->rate_flags & CAKE_FLAG_INGRESS) {
2094 len = cake_advance_shaper(q, b, skb,
2096 flow->deficit -= len;
2097 b->tin_deficit -= len;
2101 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
2102 qdisc_qstats_drop(sch);
2104 if (q->rate_flags & CAKE_FLAG_INGRESS)
2108 b->tin_ecn_mark += !!flow->cvars.ecn_marked;
2109 qdisc_bstats_update(sch, skb);
2111 /* collect delay stats */
2112 delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
2113 b->avge_delay = cake_ewma(b->avge_delay, delay, 8);
2114 b->peak_delay = cake_ewma(b->peak_delay, delay,
2115 delay > b->peak_delay ? 2 : 8);
2116 b->base_delay = cake_ewma(b->base_delay, delay,
2117 delay < b->base_delay ? 2 : 8);
2119 len = cake_advance_shaper(q, b, skb, now, false);
2120 flow->deficit -= len;
2121 b->tin_deficit -= len;
2123 if (ktime_after(q->time_next_packet, now) && sch->q.qlen) {
2124 u64 next = min(ktime_to_ns(q->time_next_packet),
2125 ktime_to_ns(q->failsafe_next_packet));
2127 qdisc_watchdog_schedule_ns(&q->watchdog, next);
2128 } else if (!sch->q.qlen) {
2131 for (i = 0; i < q->tin_cnt; i++) {
2132 if (q->tins[i].decaying_flow_count) {
2135 q->tins[i].cparams.target);
2137 qdisc_watchdog_schedule_ns(&q->watchdog,
2144 if (q->overflow_timeout)
2145 q->overflow_timeout--;
2150 static void cake_reset(struct Qdisc *sch)
2154 for (c = 0; c < CAKE_MAX_TINS; c++)
2155 cake_clear_tin(sch, c);
2158 static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
2159 [TCA_CAKE_BASE_RATE64] = { .type = NLA_U64 },
2160 [TCA_CAKE_DIFFSERV_MODE] = { .type = NLA_U32 },
2161 [TCA_CAKE_ATM] = { .type = NLA_U32 },
2162 [TCA_CAKE_FLOW_MODE] = { .type = NLA_U32 },
2163 [TCA_CAKE_OVERHEAD] = { .type = NLA_S32 },
2164 [TCA_CAKE_RTT] = { .type = NLA_U32 },
2165 [TCA_CAKE_TARGET] = { .type = NLA_U32 },
2166 [TCA_CAKE_AUTORATE] = { .type = NLA_U32 },
2167 [TCA_CAKE_MEMORY] = { .type = NLA_U32 },
2168 [TCA_CAKE_NAT] = { .type = NLA_U32 },
2169 [TCA_CAKE_RAW] = { .type = NLA_U32 },
2170 [TCA_CAKE_WASH] = { .type = NLA_U32 },
2171 [TCA_CAKE_MPU] = { .type = NLA_U32 },
2172 [TCA_CAKE_INGRESS] = { .type = NLA_U32 },
2173 [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
2176 static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
2177 u64 target_ns, u64 rtt_est_ns)
2179 /* convert byte-rate into time-per-byte
2180 * so it will always unwedge in reasonable time.
2182 static const u64 MIN_RATE = 64;
2183 u32 byte_target = mtu;
2188 b->flow_quantum = 1514;
2190 b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL);
2192 rate_ns = ((u64)NSEC_PER_SEC) << rate_shft;
2193 rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate));
2194 while (!!(rate_ns >> 34)) {
2198 } /* else unlimited, ie. zero delay */
2200 b->tin_rate_bps = rate;
2201 b->tin_rate_ns = rate_ns;
2202 b->tin_rate_shft = rate_shft;
2204 byte_target_ns = (byte_target * rate_ns) >> rate_shft;
2206 b->cparams.target = max((byte_target_ns * 3) / 2, target_ns);
2207 b->cparams.interval = max(rtt_est_ns +
2208 b->cparams.target - target_ns,
2209 b->cparams.target * 2);
2210 b->cparams.mtu_time = byte_target_ns;
2211 b->cparams.p_inc = 1 << 24; /* 1/256 */
2212 b->cparams.p_dec = 1 << 20; /* 1/4096 */
2215 static int cake_config_besteffort(struct Qdisc *sch)
2217 struct cake_sched_data *q = qdisc_priv(sch);
2218 struct cake_tin_data *b = &q->tins[0];
2219 u32 mtu = psched_mtu(qdisc_dev(sch));
2220 u64 rate = q->rate_bps;
2224 q->tin_index = besteffort;
2225 q->tin_order = normal_order;
2227 cake_set_rate(b, rate, mtu,
2228 us_to_ns(q->target), us_to_ns(q->interval));
2229 b->tin_quantum_band = 65535;
2230 b->tin_quantum_prio = 65535;
2235 static int cake_config_precedence(struct Qdisc *sch)
2237 /* convert high-level (user visible) parameters into internal format */
2238 struct cake_sched_data *q = qdisc_priv(sch);
2239 u32 mtu = psched_mtu(qdisc_dev(sch));
2240 u64 rate = q->rate_bps;
2246 q->tin_index = precedence;
2247 q->tin_order = normal_order;
2249 for (i = 0; i < q->tin_cnt; i++) {
2250 struct cake_tin_data *b = &q->tins[i];
2252 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
2253 us_to_ns(q->interval));
2255 b->tin_quantum_prio = max_t(u16, 1U, quantum1);
2256 b->tin_quantum_band = max_t(u16, 1U, quantum2);
2258 /* calculate next class's parameters */
2272 /* List of known Diffserv codepoints:
2274 * Least Effort (CS1)
2276 * Max Reliability & LLT "Lo" (TOS1)
2277 * Max Throughput (TOS2)
2280 * Assured Forwarding 1 (AF1x) - x3
2281 * Assured Forwarding 2 (AF2x) - x3
2282 * Assured Forwarding 3 (AF3x) - x3
2283 * Assured Forwarding 4 (AF4x) - x3
2284 * Precedence Class 2 (CS2)
2285 * Precedence Class 3 (CS3)
2286 * Precedence Class 4 (CS4)
2287 * Precedence Class 5 (CS5)
2288 * Precedence Class 6 (CS6)
2289 * Precedence Class 7 (CS7)
2291 * Expedited Forwarding (EF)
2293 * Total 25 codepoints.
2296 /* List of traffic classes in RFC 4594:
2297 * (roughly descending order of contended priority)
2298 * (roughly ascending order of uncontended throughput)
2300 * Network Control (CS6,CS7) - routing traffic
2301 * Telephony (EF,VA) - aka. VoIP streams
2302 * Signalling (CS5) - VoIP setup
2303 * Multimedia Conferencing (AF4x) - aka. video calls
2304 * Realtime Interactive (CS4) - eg. games
2305 * Multimedia Streaming (AF3x) - eg. YouTube, NetFlix, Twitch
2306 * Broadcast Video (CS3)
2307 * Low Latency Data (AF2x,TOS4) - eg. database
2308 * Ops, Admin, Management (CS2,TOS1) - eg. ssh
2309 * Standard Service (CS0 & unrecognised codepoints)
2310 * High Throughput Data (AF1x,TOS2) - eg. web traffic
2311 * Low Priority Data (CS1) - eg. BitTorrent
2313 * Total 12 traffic classes.
2316 static int cake_config_diffserv8(struct Qdisc *sch)
2318 /* Pruned list of traffic classes for typical applications:
2320 * Network Control (CS6, CS7)
2321 * Minimum Latency (EF, VA, CS5, CS4)
2322 * Interactive Shell (CS2, TOS1)
2323 * Low Latency Transactions (AF2x, TOS4)
2324 * Video Streaming (AF4x, AF3x, CS3)
2325 * Bog Standard (CS0 etc.)
2326 * High Throughput (AF1x, TOS2)
2327 * Background Traffic (CS1)
2329 * Total 8 traffic classes.
2332 struct cake_sched_data *q = qdisc_priv(sch);
2333 u32 mtu = psched_mtu(qdisc_dev(sch));
2334 u64 rate = q->rate_bps;
2341 /* codepoint to class mapping */
2342 q->tin_index = diffserv8;
2343 q->tin_order = normal_order;
2345 /* class characteristics */
2346 for (i = 0; i < q->tin_cnt; i++) {
2347 struct cake_tin_data *b = &q->tins[i];
2349 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
2350 us_to_ns(q->interval));
2352 b->tin_quantum_prio = max_t(u16, 1U, quantum1);
2353 b->tin_quantum_band = max_t(u16, 1U, quantum2);
2355 /* calculate next class's parameters */
2369 static int cake_config_diffserv4(struct Qdisc *sch)
2371 /* Further pruned list of traffic classes for four-class system:
2373 * Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4)
2374 * Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
2375 * Best Effort (CS0, AF1x, TOS2, and those not specified)
2376 * Background Traffic (CS1)
2378 * Total 4 traffic classes.
2381 struct cake_sched_data *q = qdisc_priv(sch);
2382 u32 mtu = psched_mtu(qdisc_dev(sch));
2383 u64 rate = q->rate_bps;
2388 /* codepoint to class mapping */
2389 q->tin_index = diffserv4;
2390 q->tin_order = bulk_order;
2392 /* class characteristics */
2393 cake_set_rate(&q->tins[0], rate, mtu,
2394 us_to_ns(q->target), us_to_ns(q->interval));
2395 cake_set_rate(&q->tins[1], rate >> 4, mtu,
2396 us_to_ns(q->target), us_to_ns(q->interval));
2397 cake_set_rate(&q->tins[2], rate >> 1, mtu,
2398 us_to_ns(q->target), us_to_ns(q->interval));
2399 cake_set_rate(&q->tins[3], rate >> 2, mtu,
2400 us_to_ns(q->target), us_to_ns(q->interval));
2402 /* priority weights */
2403 q->tins[0].tin_quantum_prio = quantum;
2404 q->tins[1].tin_quantum_prio = quantum >> 4;
2405 q->tins[2].tin_quantum_prio = quantum << 2;
2406 q->tins[3].tin_quantum_prio = quantum << 4;
2408 /* bandwidth-sharing weights */
2409 q->tins[0].tin_quantum_band = quantum;
2410 q->tins[1].tin_quantum_band = quantum >> 4;
2411 q->tins[2].tin_quantum_band = quantum >> 1;
2412 q->tins[3].tin_quantum_band = quantum >> 2;
2417 static int cake_config_diffserv3(struct Qdisc *sch)
2419 /* Simplified Diffserv structure with 3 tins.
2420 * Low Priority (CS1)
2422 * Latency Sensitive (TOS4, VA, EF, CS6, CS7)
2424 struct cake_sched_data *q = qdisc_priv(sch);
2425 u32 mtu = psched_mtu(qdisc_dev(sch));
2426 u64 rate = q->rate_bps;
2431 /* codepoint to class mapping */
2432 q->tin_index = diffserv3;
2433 q->tin_order = bulk_order;
2435 /* class characteristics */
2436 cake_set_rate(&q->tins[0], rate, mtu,
2437 us_to_ns(q->target), us_to_ns(q->interval));
2438 cake_set_rate(&q->tins[1], rate >> 4, mtu,
2439 us_to_ns(q->target), us_to_ns(q->interval));
2440 cake_set_rate(&q->tins[2], rate >> 2, mtu,
2441 us_to_ns(q->target), us_to_ns(q->interval));
2443 /* priority weights */
2444 q->tins[0].tin_quantum_prio = quantum;
2445 q->tins[1].tin_quantum_prio = quantum >> 4;
2446 q->tins[2].tin_quantum_prio = quantum << 4;
2448 /* bandwidth-sharing weights */
2449 q->tins[0].tin_quantum_band = quantum;
2450 q->tins[1].tin_quantum_band = quantum >> 4;
2451 q->tins[2].tin_quantum_band = quantum >> 2;
2456 static void cake_reconfigure(struct Qdisc *sch)
2458 struct cake_sched_data *q = qdisc_priv(sch);
2461 switch (q->tin_mode) {
2462 case CAKE_DIFFSERV_BESTEFFORT:
2463 ft = cake_config_besteffort(sch);
2466 case CAKE_DIFFSERV_PRECEDENCE:
2467 ft = cake_config_precedence(sch);
2470 case CAKE_DIFFSERV_DIFFSERV8:
2471 ft = cake_config_diffserv8(sch);
2474 case CAKE_DIFFSERV_DIFFSERV4:
2475 ft = cake_config_diffserv4(sch);
2478 case CAKE_DIFFSERV_DIFFSERV3:
2480 ft = cake_config_diffserv3(sch);
2484 for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) {
2485 cake_clear_tin(sch, c);
2486 q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time;
2489 q->rate_ns = q->tins[ft].tin_rate_ns;
2490 q->rate_shft = q->tins[ft].tin_rate_shft;
2492 if (q->buffer_config_limit) {
2493 q->buffer_limit = q->buffer_config_limit;
2494 } else if (q->rate_bps) {
2495 u64 t = q->rate_bps * q->interval;
2497 do_div(t, USEC_PER_SEC / 4);
2498 q->buffer_limit = max_t(u32, t, 4U << 20);
2500 q->buffer_limit = ~0;
2503 sch->flags &= ~TCQ_F_CAN_BYPASS;
2505 q->buffer_limit = min(q->buffer_limit,
2506 max(sch->limit * psched_mtu(qdisc_dev(sch)),
2507 q->buffer_config_limit));
2510 static int cake_change(struct Qdisc *sch, struct nlattr *opt,
2511 struct netlink_ext_ack *extack)
2513 struct cake_sched_data *q = qdisc_priv(sch);
2514 struct nlattr *tb[TCA_CAKE_MAX + 1];
2520 err = nla_parse_nested(tb, TCA_CAKE_MAX, opt, cake_policy, extack);
2524 if (tb[TCA_CAKE_NAT]) {
2525 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
2526 q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
2527 q->flow_mode |= CAKE_FLOW_NAT_FLAG *
2528 !!nla_get_u32(tb[TCA_CAKE_NAT]);
2530 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
2531 "No conntrack support in kernel");
2536 if (tb[TCA_CAKE_BASE_RATE64])
2537 q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
2539 if (tb[TCA_CAKE_DIFFSERV_MODE])
2540 q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]);
2542 if (tb[TCA_CAKE_WASH]) {
2543 if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
2544 q->rate_flags |= CAKE_FLAG_WASH;
2546 q->rate_flags &= ~CAKE_FLAG_WASH;
2549 if (tb[TCA_CAKE_FLOW_MODE])
2550 q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) |
2551 (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
2554 if (tb[TCA_CAKE_ATM])
2555 q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]);
2557 if (tb[TCA_CAKE_OVERHEAD]) {
2558 q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]);
2559 q->rate_flags |= CAKE_FLAG_OVERHEAD;
2567 if (tb[TCA_CAKE_RAW]) {
2568 q->rate_flags &= ~CAKE_FLAG_OVERHEAD;
2576 if (tb[TCA_CAKE_MPU])
2577 q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]);
2579 if (tb[TCA_CAKE_RTT]) {
2580 q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
2586 if (tb[TCA_CAKE_TARGET]) {
2587 q->target = nla_get_u32(tb[TCA_CAKE_TARGET]);
2593 if (tb[TCA_CAKE_AUTORATE]) {
2594 if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
2595 q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
2597 q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
2600 if (tb[TCA_CAKE_INGRESS]) {
2601 if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
2602 q->rate_flags |= CAKE_FLAG_INGRESS;
2604 q->rate_flags &= ~CAKE_FLAG_INGRESS;
2607 if (tb[TCA_CAKE_ACK_FILTER])
2608 q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
2610 if (tb[TCA_CAKE_MEMORY])
2611 q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
2613 if (tb[TCA_CAKE_SPLIT_GSO]) {
2614 if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO]))
2615 q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
2617 q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
2622 cake_reconfigure(sch);
2623 sch_tree_unlock(sch);
2629 static void cake_destroy(struct Qdisc *sch)
2631 struct cake_sched_data *q = qdisc_priv(sch);
2633 qdisc_watchdog_cancel(&q->watchdog);
2634 tcf_block_put(q->block);
2638 static int cake_init(struct Qdisc *sch, struct nlattr *opt,
2639 struct netlink_ext_ack *extack)
2641 struct cake_sched_data *q = qdisc_priv(sch);
2645 q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
2646 q->flow_mode = CAKE_FLOW_TRIPLE;
2648 q->rate_bps = 0; /* unlimited by default */
2650 q->interval = 100000; /* 100ms default */
2651 q->target = 5000; /* 5ms: codel RFC argues
2652 * for 5 to 10% of interval
2654 q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
2658 qdisc_watchdog_init(&q->watchdog, sch);
2661 err = cake_change(sch, opt, extack);
2667 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
2671 quantum_div[0] = ~0;
2672 for (i = 1; i <= CAKE_QUEUES; i++)
2673 quantum_div[i] = 65535 / i;
2675 q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
2680 for (i = 0; i < CAKE_MAX_TINS; i++) {
2681 struct cake_tin_data *b = q->tins + i;
2683 INIT_LIST_HEAD(&b->new_flows);
2684 INIT_LIST_HEAD(&b->old_flows);
2685 INIT_LIST_HEAD(&b->decaying_flows);
2686 b->sparse_flow_count = 0;
2687 b->bulk_flow_count = 0;
2688 b->decaying_flow_count = 0;
2690 for (j = 0; j < CAKE_QUEUES; j++) {
2691 struct cake_flow *flow = b->flows + j;
2692 u32 k = j * CAKE_MAX_TINS + i;
2694 INIT_LIST_HEAD(&flow->flowchain);
2695 cobalt_vars_init(&flow->cvars);
2697 q->overflow_heap[k].t = i;
2698 q->overflow_heap[k].b = j;
2699 b->overflow_idx[j] = k;
2703 cake_reconfigure(sch);
2704 q->avg_peak_bandwidth = q->rate_bps;
2710 static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
2712 struct cake_sched_data *q = qdisc_priv(sch);
2713 struct nlattr *opts;
2715 opts = nla_nest_start(skb, TCA_OPTIONS);
2717 goto nla_put_failure;
2719 if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
2721 goto nla_put_failure;
2723 if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
2724 q->flow_mode & CAKE_FLOW_MASK))
2725 goto nla_put_failure;
2727 if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
2728 goto nla_put_failure;
2730 if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
2731 goto nla_put_failure;
2733 if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
2734 goto nla_put_failure;
2736 if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
2737 !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
2738 goto nla_put_failure;
2740 if (nla_put_u32(skb, TCA_CAKE_INGRESS,
2741 !!(q->rate_flags & CAKE_FLAG_INGRESS)))
2742 goto nla_put_failure;
2744 if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
2745 goto nla_put_failure;
2747 if (nla_put_u32(skb, TCA_CAKE_NAT,
2748 !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
2749 goto nla_put_failure;
2751 if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode))
2752 goto nla_put_failure;
2754 if (nla_put_u32(skb, TCA_CAKE_WASH,
2755 !!(q->rate_flags & CAKE_FLAG_WASH)))
2756 goto nla_put_failure;
2758 if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead))
2759 goto nla_put_failure;
2761 if (!(q->rate_flags & CAKE_FLAG_OVERHEAD))
2762 if (nla_put_u32(skb, TCA_CAKE_RAW, 0))
2763 goto nla_put_failure;
2765 if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode))
2766 goto nla_put_failure;
2768 if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu))
2769 goto nla_put_failure;
2771 if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
2772 !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
2773 goto nla_put_failure;
2775 return nla_nest_end(skb, opts);
2781 static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
2783 struct nlattr *stats = nla_nest_start(d->skb, TCA_STATS_APP);
2784 struct cake_sched_data *q = qdisc_priv(sch);
2785 struct nlattr *tstats, *ts;
2791 #define PUT_STAT_U32(attr, data) do { \
2792 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2793 goto nla_put_failure; \
2795 #define PUT_STAT_U64(attr, data) do { \
2796 if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \
2797 data, TCA_CAKE_STATS_PAD)) \
2798 goto nla_put_failure; \
2801 PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth);
2802 PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit);
2803 PUT_STAT_U32(MEMORY_USED, q->buffer_max_used);
2804 PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16));
2805 PUT_STAT_U32(MAX_NETLEN, q->max_netlen);
2806 PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen);
2807 PUT_STAT_U32(MIN_NETLEN, q->min_netlen);
2808 PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen);
2813 tstats = nla_nest_start(d->skb, TCA_CAKE_STATS_TIN_STATS);
2815 goto nla_put_failure;
2817 #define PUT_TSTAT_U32(attr, data) do { \
2818 if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \
2819 goto nla_put_failure; \
2821 #define PUT_TSTAT_U64(attr, data) do { \
2822 if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \
2823 data, TCA_CAKE_TIN_STATS_PAD)) \
2824 goto nla_put_failure; \
2827 for (i = 0; i < q->tin_cnt; i++) {
2828 struct cake_tin_data *b = &q->tins[q->tin_order[i]];
2830 ts = nla_nest_start(d->skb, i + 1);
2832 goto nla_put_failure;
2834 PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps);
2835 PUT_TSTAT_U64(SENT_BYTES64, b->bytes);
2836 PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog);
2838 PUT_TSTAT_U32(TARGET_US,
2839 ktime_to_us(ns_to_ktime(b->cparams.target)));
2840 PUT_TSTAT_U32(INTERVAL_US,
2841 ktime_to_us(ns_to_ktime(b->cparams.interval)));
2843 PUT_TSTAT_U32(SENT_PACKETS, b->packets);
2844 PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped);
2845 PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark);
2846 PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops);
2848 PUT_TSTAT_U32(PEAK_DELAY_US,
2849 ktime_to_us(ns_to_ktime(b->peak_delay)));
2850 PUT_TSTAT_U32(AVG_DELAY_US,
2851 ktime_to_us(ns_to_ktime(b->avge_delay)));
2852 PUT_TSTAT_U32(BASE_DELAY_US,
2853 ktime_to_us(ns_to_ktime(b->base_delay)));
2855 PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits);
2856 PUT_TSTAT_U32(WAY_MISSES, b->way_misses);
2857 PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions);
2859 PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count +
2860 b->decaying_flow_count);
2861 PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count);
2862 PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count);
2863 PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen);
2865 PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum);
2866 nla_nest_end(d->skb, ts);
2869 #undef PUT_TSTAT_U32
2870 #undef PUT_TSTAT_U64
2872 nla_nest_end(d->skb, tstats);
2873 return nla_nest_end(d->skb, stats);
2876 nla_nest_cancel(d->skb, stats);
2880 static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg)
2885 static unsigned long cake_find(struct Qdisc *sch, u32 classid)
2890 static unsigned long cake_bind(struct Qdisc *sch, unsigned long parent,
2896 static void cake_unbind(struct Qdisc *q, unsigned long cl)
2900 static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl,
2901 struct netlink_ext_ack *extack)
2903 struct cake_sched_data *q = qdisc_priv(sch);
2910 static int cake_dump_class(struct Qdisc *sch, unsigned long cl,
2911 struct sk_buff *skb, struct tcmsg *tcm)
2913 tcm->tcm_handle |= TC_H_MIN(cl);
2917 static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
2918 struct gnet_dump *d)
2920 struct cake_sched_data *q = qdisc_priv(sch);
2921 const struct cake_flow *flow = NULL;
2922 struct gnet_stats_queue qs = { 0 };
2923 struct nlattr *stats;
2926 if (idx < CAKE_QUEUES * q->tin_cnt) {
2927 const struct cake_tin_data *b = \
2928 &q->tins[q->tin_order[idx / CAKE_QUEUES]];
2929 const struct sk_buff *skb;
2931 flow = &b->flows[idx % CAKE_QUEUES];
2940 sch_tree_unlock(sch);
2942 qs.backlog = b->backlogs[idx % CAKE_QUEUES];
2943 qs.drops = flow->dropped;
2945 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
2948 ktime_t now = ktime_get();
2950 stats = nla_nest_start(d->skb, TCA_STATS_APP);
2954 #define PUT_STAT_U32(attr, data) do { \
2955 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2956 goto nla_put_failure; \
2958 #define PUT_STAT_S32(attr, data) do { \
2959 if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2960 goto nla_put_failure; \
2963 PUT_STAT_S32(DEFICIT, flow->deficit);
2964 PUT_STAT_U32(DROPPING, flow->cvars.dropping);
2965 PUT_STAT_U32(COBALT_COUNT, flow->cvars.count);
2966 PUT_STAT_U32(P_DROP, flow->cvars.p_drop);
2967 if (flow->cvars.p_drop) {
2968 PUT_STAT_S32(BLUE_TIMER_US,
2971 flow->cvars.blue_timer)));
2973 if (flow->cvars.dropping) {
2974 PUT_STAT_S32(DROP_NEXT_US,
2977 flow->cvars.drop_next)));
2980 if (nla_nest_end(d->skb, stats) < 0)
2987 nla_nest_cancel(d->skb, stats);
2991 static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2993 struct cake_sched_data *q = qdisc_priv(sch);
2999 for (i = 0; i < q->tin_cnt; i++) {
3000 struct cake_tin_data *b = &q->tins[q->tin_order[i]];
3002 for (j = 0; j < CAKE_QUEUES; j++) {
3003 if (list_empty(&b->flows[j].flowchain) ||
3004 arg->count < arg->skip) {
3008 if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) {
3017 static const struct Qdisc_class_ops cake_class_ops = {
3020 .tcf_block = cake_tcf_block,
3021 .bind_tcf = cake_bind,
3022 .unbind_tcf = cake_unbind,
3023 .dump = cake_dump_class,
3024 .dump_stats = cake_dump_class_stats,
3028 static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
3029 .cl_ops = &cake_class_ops,
3031 .priv_size = sizeof(struct cake_sched_data),
3032 .enqueue = cake_enqueue,
3033 .dequeue = cake_dequeue,
3034 .peek = qdisc_peek_dequeued,
3036 .reset = cake_reset,
3037 .destroy = cake_destroy,
3038 .change = cake_change,
3040 .dump_stats = cake_dump_stats,
3041 .owner = THIS_MODULE,
3044 static int __init cake_module_init(void)
3046 return register_qdisc(&cake_qdisc_ops);
3049 static void __exit cake_module_exit(void)
3051 unregister_qdisc(&cake_qdisc_ops);
3054 module_init(cake_module_init)
3055 module_exit(cake_module_exit)
3056 MODULE_AUTHOR("Jonathan Morton");
3057 MODULE_LICENSE("Dual BSD/GPL");
3058 MODULE_DESCRIPTION("The CAKE shaper.");