1 // SPDX-License-Identifier: GPL-2.0-only
3 * net/sched/sch_choke.c CHOKE scheduler
5 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
6 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/skbuff.h>
13 #include <linux/vmalloc.h>
14 #include <net/pkt_sched.h>
15 #include <net/pkt_cls.h>
16 #include <net/inet_ecn.h>
18 #include <net/flow_dissector.h>
21 CHOKe stateless AQM for fair bandwidth allocation
22 =================================================
24 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
25 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
26 maintains no flow state. The difference from RED is an additional step
27 during the enqueuing process. If average queue size is over the
28 low threshold (qmin), a packet is chosen at random from the queue.
29 If both the new and chosen packet are from the same flow, both
30 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
31 needs to access packets in queue randomly. It has a minimal class
32 interface to allow overriding the builtin flow classifier with
36 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
37 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
40 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
41 Characteristics", IEEE/ACM Transactions on Networking, 2004
45 /* Upper bound on size of sk_buff table (packets) */
46 #define CHOKE_MAX_QUEUE (128*1024 - 1)
48 struct choke_sched_data {
53 struct red_parms parms;
58 u32 prob_drop; /* Early probability drops */
59 u32 prob_mark; /* Early probability marks */
60 u32 forced_drop; /* Forced drops, qavg > max_thresh */
61 u32 forced_mark; /* Forced marks, qavg > max_thresh */
62 u32 pdrop; /* Drops due to queue limits */
63 u32 other; /* Drops due to drop() calls */
64 u32 matched; /* Drops to flow match */
70 unsigned int tab_mask; /* size - 1 */
75 /* number of elements in queue including holes */
76 static unsigned int choke_len(const struct choke_sched_data *q)
78 return (q->tail - q->head) & q->tab_mask;
81 /* Is ECN parameter configured */
82 static int use_ecn(const struct choke_sched_data *q)
84 return q->flags & TC_RED_ECN;
87 /* Should packets over max just be dropped (versus marked) */
88 static int use_harddrop(const struct choke_sched_data *q)
90 return q->flags & TC_RED_HARDDROP;
93 /* Move head pointer forward to skip over holes */
94 static void choke_zap_head_holes(struct choke_sched_data *q)
97 q->head = (q->head + 1) & q->tab_mask;
98 if (q->head == q->tail)
100 } while (q->tab[q->head] == NULL);
103 /* Move tail pointer backwards to reuse holes */
104 static void choke_zap_tail_holes(struct choke_sched_data *q)
107 q->tail = (q->tail - 1) & q->tab_mask;
108 if (q->head == q->tail)
110 } while (q->tab[q->tail] == NULL);
113 /* Drop packet from queue array by creating a "hole" */
114 static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
115 struct sk_buff **to_free)
117 struct choke_sched_data *q = qdisc_priv(sch);
118 struct sk_buff *skb = q->tab[idx];
123 choke_zap_head_holes(q);
125 choke_zap_tail_holes(q);
127 qdisc_qstats_backlog_dec(sch, skb);
128 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
129 qdisc_drop(skb, sch, to_free);
133 struct choke_skb_cb {
135 struct flow_keys_digest keys;
138 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
140 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
141 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
145 * Compare flow of two packets
146 * Returns true only if source and destination address and port match.
147 * false for special cases
149 static bool choke_match_flow(struct sk_buff *skb1,
150 struct sk_buff *skb2)
152 struct flow_keys temp;
154 if (skb1->protocol != skb2->protocol)
157 if (!choke_skb_cb(skb1)->keys_valid) {
158 choke_skb_cb(skb1)->keys_valid = 1;
159 skb_flow_dissect_flow_keys(skb1, &temp, 0);
160 make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
163 if (!choke_skb_cb(skb2)->keys_valid) {
164 choke_skb_cb(skb2)->keys_valid = 1;
165 skb_flow_dissect_flow_keys(skb2, &temp, 0);
166 make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
169 return !memcmp(&choke_skb_cb(skb1)->keys,
170 &choke_skb_cb(skb2)->keys,
171 sizeof(choke_skb_cb(skb1)->keys));
175 * Select a packet at random from queue
176 * HACK: since queue can have holes from previous deletion; retry several
177 * times to find a random skb but then just give up and return the head
178 * Will return NULL if queue is empty (q->head == q->tail)
180 static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
187 *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
191 } while (--retrys > 0);
193 return q->tab[*pidx = q->head];
197 * Compare new packet with random packet in queue
198 * returns true if matched and sets *pidx
200 static bool choke_match_random(const struct choke_sched_data *q,
201 struct sk_buff *nskb,
204 struct sk_buff *oskb;
206 if (q->head == q->tail)
209 oskb = choke_peek_random(q, pidx);
210 return choke_match_flow(oskb, nskb);
213 static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
214 struct sk_buff **to_free)
216 struct choke_sched_data *q = qdisc_priv(sch);
217 const struct red_parms *p = &q->parms;
219 choke_skb_cb(skb)->keys_valid = 0;
220 /* Compute average queue usage (see RED) */
221 q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
222 if (red_is_idling(&q->vars))
223 red_end_of_idle_period(&q->vars);
225 /* Is queue small? */
226 if (q->vars.qavg <= p->qth_min)
231 /* Draw a packet at random from queue and compare flow */
232 if (choke_match_random(q, skb, &idx)) {
234 choke_drop_by_idx(sch, idx, to_free);
235 goto congestion_drop;
238 /* Queue is large, always mark/drop */
239 if (q->vars.qavg > p->qth_max) {
242 qdisc_qstats_overlimit(sch);
243 if (use_harddrop(q) || !use_ecn(q) ||
244 !INET_ECN_set_ce(skb)) {
245 q->stats.forced_drop++;
246 goto congestion_drop;
249 q->stats.forced_mark++;
250 } else if (++q->vars.qcount) {
251 if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
253 q->vars.qR = red_random(p);
255 qdisc_qstats_overlimit(sch);
256 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
257 q->stats.prob_drop++;
258 goto congestion_drop;
261 q->stats.prob_mark++;
264 q->vars.qR = red_random(p);
267 /* Admit new packet */
268 if (sch->q.qlen < q->limit) {
269 q->tab[q->tail] = skb;
270 q->tail = (q->tail + 1) & q->tab_mask;
272 qdisc_qstats_backlog_inc(sch, skb);
273 return NET_XMIT_SUCCESS;
277 return qdisc_drop(skb, sch, to_free);
280 qdisc_drop(skb, sch, to_free);
284 static struct sk_buff *choke_dequeue(struct Qdisc *sch)
286 struct choke_sched_data *q = qdisc_priv(sch);
289 if (q->head == q->tail) {
290 if (!red_is_idling(&q->vars))
291 red_start_of_idle_period(&q->vars);
295 skb = q->tab[q->head];
296 q->tab[q->head] = NULL;
297 choke_zap_head_holes(q);
299 qdisc_qstats_backlog_dec(sch, skb);
300 qdisc_bstats_update(sch, skb);
305 static void choke_reset(struct Qdisc *sch)
307 struct choke_sched_data *q = qdisc_priv(sch);
309 while (q->head != q->tail) {
310 struct sk_buff *skb = q->tab[q->head];
312 q->head = (q->head + 1) & q->tab_mask;
315 rtnl_qdisc_drop(skb, sch);
319 sch->qstats.backlog = 0;
321 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
322 q->head = q->tail = 0;
323 red_restart(&q->vars);
326 static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
327 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
328 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
329 [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
333 static void choke_free(void *addr)
338 static int choke_change(struct Qdisc *sch, struct nlattr *opt,
339 struct netlink_ext_ack *extack)
341 struct choke_sched_data *q = qdisc_priv(sch);
342 struct nlattr *tb[TCA_CHOKE_MAX + 1];
343 const struct tc_red_qopt *ctl;
345 struct sk_buff **old = NULL;
353 err = nla_parse_nested_deprecated(tb, TCA_CHOKE_MAX, opt,
358 if (tb[TCA_CHOKE_PARMS] == NULL ||
359 tb[TCA_CHOKE_STAB] == NULL)
362 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
364 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
365 stab = nla_data(tb[TCA_CHOKE_STAB]);
366 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
369 if (ctl->limit > CHOKE_MAX_QUEUE)
372 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
373 if (mask != q->tab_mask) {
374 struct sk_buff **ntab;
376 ntab = kvcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
383 unsigned int oqlen = sch->q.qlen, tail = 0;
384 unsigned dropped = 0;
386 while (q->head != q->tail) {
387 struct sk_buff *skb = q->tab[q->head];
389 q->head = (q->head + 1) & q->tab_mask;
396 dropped += qdisc_pkt_len(skb);
397 qdisc_qstats_backlog_dec(sch, skb);
399 rtnl_qdisc_drop(skb, sch);
401 qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
411 q->flags = ctl->flags;
412 q->limit = ctl->limit;
414 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
415 ctl->Plog, ctl->Scell_log,
418 red_set_vars(&q->vars);
420 if (q->head == q->tail)
421 red_end_of_idle_period(&q->vars);
423 sch_tree_unlock(sch);
428 static int choke_init(struct Qdisc *sch, struct nlattr *opt,
429 struct netlink_ext_ack *extack)
431 return choke_change(sch, opt, extack);
434 static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
436 struct choke_sched_data *q = qdisc_priv(sch);
437 struct nlattr *opts = NULL;
438 struct tc_red_qopt opt = {
441 .qth_min = q->parms.qth_min >> q->parms.Wlog,
442 .qth_max = q->parms.qth_max >> q->parms.Wlog,
443 .Wlog = q->parms.Wlog,
444 .Plog = q->parms.Plog,
445 .Scell_log = q->parms.Scell_log,
448 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
450 goto nla_put_failure;
452 if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
453 nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
454 goto nla_put_failure;
455 return nla_nest_end(skb, opts);
458 nla_nest_cancel(skb, opts);
462 static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
464 struct choke_sched_data *q = qdisc_priv(sch);
465 struct tc_choke_xstats st = {
466 .early = q->stats.prob_drop + q->stats.forced_drop,
467 .marked = q->stats.prob_mark + q->stats.forced_mark,
468 .pdrop = q->stats.pdrop,
469 .other = q->stats.other,
470 .matched = q->stats.matched,
473 return gnet_stats_copy_app(d, &st, sizeof(st));
476 static void choke_destroy(struct Qdisc *sch)
478 struct choke_sched_data *q = qdisc_priv(sch);
483 static struct sk_buff *choke_peek_head(struct Qdisc *sch)
485 struct choke_sched_data *q = qdisc_priv(sch);
487 return (q->head != q->tail) ? q->tab[q->head] : NULL;
490 static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
492 .priv_size = sizeof(struct choke_sched_data),
494 .enqueue = choke_enqueue,
495 .dequeue = choke_dequeue,
496 .peek = choke_peek_head,
498 .destroy = choke_destroy,
499 .reset = choke_reset,
500 .change = choke_change,
502 .dump_stats = choke_dump_stats,
503 .owner = THIS_MODULE,
506 static int __init choke_module_init(void)
508 return register_qdisc(&choke_qdisc_ops);
511 static void __exit choke_module_exit(void)
513 unregister_qdisc(&choke_qdisc_ops);
516 module_init(choke_module_init)
517 module_exit(choke_module_exit)
519 MODULE_LICENSE("GPL");