GNU Linux-libre 5.4.207-gnu1
[releases.git] / net / sched / sch_red.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_red.c  Random Early Detection queue.
4  *
5  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  * J Hadi Salim 980914: computation fixes
9  * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
10  * J Hadi Salim 980816:  ECN support
11  */
12
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/skbuff.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
19 #include <net/inet_ecn.h>
20 #include <net/red.h>
21
22
23 /*      Parameters, settable by user:
24         -----------------------------
25
26         limit           - bytes (must be > qth_max + burst)
27
28         Hard limit on queue length, should be chosen >qth_max
29         to allow packet bursts. This parameter does not
30         affect the algorithms behaviour and can be chosen
31         arbitrarily high (well, less than ram size)
32         Really, this limit will never be reached
33         if RED works correctly.
34  */
35
36 struct red_sched_data {
37         u32                     limit;          /* HARD maximal queue length */
38         unsigned char           flags;
39         struct timer_list       adapt_timer;
40         struct Qdisc            *sch;
41         struct red_parms        parms;
42         struct red_vars         vars;
43         struct red_stats        stats;
44         struct Qdisc            *qdisc;
45 };
46
47 static inline int red_use_ecn(struct red_sched_data *q)
48 {
49         return q->flags & TC_RED_ECN;
50 }
51
52 static inline int red_use_harddrop(struct red_sched_data *q)
53 {
54         return q->flags & TC_RED_HARDDROP;
55 }
56
57 static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
58                        struct sk_buff **to_free)
59 {
60         struct red_sched_data *q = qdisc_priv(sch);
61         struct Qdisc *child = q->qdisc;
62         int ret;
63
64         q->vars.qavg = red_calc_qavg(&q->parms,
65                                      &q->vars,
66                                      child->qstats.backlog);
67
68         if (red_is_idling(&q->vars))
69                 red_end_of_idle_period(&q->vars);
70
71         switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
72         case RED_DONT_MARK:
73                 break;
74
75         case RED_PROB_MARK:
76                 qdisc_qstats_overlimit(sch);
77                 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
78                         q->stats.prob_drop++;
79                         goto congestion_drop;
80                 }
81
82                 q->stats.prob_mark++;
83                 break;
84
85         case RED_HARD_MARK:
86                 qdisc_qstats_overlimit(sch);
87                 if (red_use_harddrop(q) || !red_use_ecn(q) ||
88                     !INET_ECN_set_ce(skb)) {
89                         q->stats.forced_drop++;
90                         goto congestion_drop;
91                 }
92
93                 q->stats.forced_mark++;
94                 break;
95         }
96
97         ret = qdisc_enqueue(skb, child, to_free);
98         if (likely(ret == NET_XMIT_SUCCESS)) {
99                 qdisc_qstats_backlog_inc(sch, skb);
100                 sch->q.qlen++;
101         } else if (net_xmit_drop_count(ret)) {
102                 q->stats.pdrop++;
103                 qdisc_qstats_drop(sch);
104         }
105         return ret;
106
107 congestion_drop:
108         qdisc_drop(skb, sch, to_free);
109         return NET_XMIT_CN;
110 }
111
112 static struct sk_buff *red_dequeue(struct Qdisc *sch)
113 {
114         struct sk_buff *skb;
115         struct red_sched_data *q = qdisc_priv(sch);
116         struct Qdisc *child = q->qdisc;
117
118         skb = child->dequeue(child);
119         if (skb) {
120                 qdisc_bstats_update(sch, skb);
121                 qdisc_qstats_backlog_dec(sch, skb);
122                 sch->q.qlen--;
123         } else {
124                 if (!red_is_idling(&q->vars))
125                         red_start_of_idle_period(&q->vars);
126         }
127         return skb;
128 }
129
130 static struct sk_buff *red_peek(struct Qdisc *sch)
131 {
132         struct red_sched_data *q = qdisc_priv(sch);
133         struct Qdisc *child = q->qdisc;
134
135         return child->ops->peek(child);
136 }
137
138 static void red_reset(struct Qdisc *sch)
139 {
140         struct red_sched_data *q = qdisc_priv(sch);
141
142         qdisc_reset(q->qdisc);
143         sch->qstats.backlog = 0;
144         sch->q.qlen = 0;
145         red_restart(&q->vars);
146 }
147
148 static int red_offload(struct Qdisc *sch, bool enable)
149 {
150         struct red_sched_data *q = qdisc_priv(sch);
151         struct net_device *dev = qdisc_dev(sch);
152         struct tc_red_qopt_offload opt = {
153                 .handle = sch->handle,
154                 .parent = sch->parent,
155         };
156
157         if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
158                 return -EOPNOTSUPP;
159
160         if (enable) {
161                 opt.command = TC_RED_REPLACE;
162                 opt.set.min = q->parms.qth_min >> q->parms.Wlog;
163                 opt.set.max = q->parms.qth_max >> q->parms.Wlog;
164                 opt.set.probability = q->parms.max_P;
165                 opt.set.limit = q->limit;
166                 opt.set.is_ecn = red_use_ecn(q);
167                 opt.set.is_harddrop = red_use_harddrop(q);
168                 opt.set.qstats = &sch->qstats;
169         } else {
170                 opt.command = TC_RED_DESTROY;
171         }
172
173         return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
174 }
175
176 static void red_destroy(struct Qdisc *sch)
177 {
178         struct red_sched_data *q = qdisc_priv(sch);
179
180         del_timer_sync(&q->adapt_timer);
181         red_offload(sch, false);
182         qdisc_put(q->qdisc);
183 }
184
185 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
186         [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
187         [TCA_RED_STAB]  = { .len = RED_STAB_SIZE },
188         [TCA_RED_MAX_P] = { .type = NLA_U32 },
189 };
190
191 static int red_change(struct Qdisc *sch, struct nlattr *opt,
192                       struct netlink_ext_ack *extack)
193 {
194         struct Qdisc *old_child = NULL, *child = NULL;
195         struct red_sched_data *q = qdisc_priv(sch);
196         struct nlattr *tb[TCA_RED_MAX + 1];
197         struct tc_red_qopt *ctl;
198         int err;
199         u32 max_P;
200         u8 *stab;
201
202         if (opt == NULL)
203                 return -EINVAL;
204
205         err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
206                                           NULL);
207         if (err < 0)
208                 return err;
209
210         if (tb[TCA_RED_PARMS] == NULL ||
211             tb[TCA_RED_STAB] == NULL)
212                 return -EINVAL;
213
214         max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
215
216         ctl = nla_data(tb[TCA_RED_PARMS]);
217         stab = nla_data(tb[TCA_RED_STAB]);
218         if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
219                               ctl->Scell_log, stab))
220                 return -EINVAL;
221
222         if (ctl->limit > 0) {
223                 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
224                                          extack);
225                 if (IS_ERR(child))
226                         return PTR_ERR(child);
227
228                 /* child is fifo, no need to check for noop_qdisc */
229                 qdisc_hash_add(child, true);
230         }
231
232         sch_tree_lock(sch);
233         q->flags = ctl->flags;
234         q->limit = ctl->limit;
235         if (child) {
236                 qdisc_tree_flush_backlog(q->qdisc);
237                 old_child = q->qdisc;
238                 q->qdisc = child;
239         }
240
241         red_set_parms(&q->parms,
242                       ctl->qth_min, ctl->qth_max, ctl->Wlog,
243                       ctl->Plog, ctl->Scell_log,
244                       stab,
245                       max_P);
246         red_set_vars(&q->vars);
247
248         del_timer(&q->adapt_timer);
249         if (ctl->flags & TC_RED_ADAPTATIVE)
250                 mod_timer(&q->adapt_timer, jiffies + HZ/2);
251
252         if (!q->qdisc->q.qlen)
253                 red_start_of_idle_period(&q->vars);
254
255         sch_tree_unlock(sch);
256
257         red_offload(sch, true);
258
259         if (old_child)
260                 qdisc_put(old_child);
261         return 0;
262 }
263
264 static inline void red_adaptative_timer(struct timer_list *t)
265 {
266         struct red_sched_data *q = from_timer(q, t, adapt_timer);
267         struct Qdisc *sch = q->sch;
268         spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
269
270         spin_lock(root_lock);
271         red_adaptative_algo(&q->parms, &q->vars);
272         mod_timer(&q->adapt_timer, jiffies + HZ/2);
273         spin_unlock(root_lock);
274 }
275
276 static int red_init(struct Qdisc *sch, struct nlattr *opt,
277                     struct netlink_ext_ack *extack)
278 {
279         struct red_sched_data *q = qdisc_priv(sch);
280
281         q->qdisc = &noop_qdisc;
282         q->sch = sch;
283         timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
284         return red_change(sch, opt, extack);
285 }
286
287 static int red_dump_offload_stats(struct Qdisc *sch)
288 {
289         struct tc_red_qopt_offload hw_stats = {
290                 .command = TC_RED_STATS,
291                 .handle = sch->handle,
292                 .parent = sch->parent,
293                 {
294                         .stats.bstats = &sch->bstats,
295                         .stats.qstats = &sch->qstats,
296                 },
297         };
298
299         return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
300 }
301
302 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
303 {
304         struct red_sched_data *q = qdisc_priv(sch);
305         struct nlattr *opts = NULL;
306         struct tc_red_qopt opt = {
307                 .limit          = q->limit,
308                 .flags          = q->flags,
309                 .qth_min        = q->parms.qth_min >> q->parms.Wlog,
310                 .qth_max        = q->parms.qth_max >> q->parms.Wlog,
311                 .Wlog           = q->parms.Wlog,
312                 .Plog           = q->parms.Plog,
313                 .Scell_log      = q->parms.Scell_log,
314         };
315         int err;
316
317         err = red_dump_offload_stats(sch);
318         if (err)
319                 goto nla_put_failure;
320
321         opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
322         if (opts == NULL)
323                 goto nla_put_failure;
324         if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
325             nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
326                 goto nla_put_failure;
327         return nla_nest_end(skb, opts);
328
329 nla_put_failure:
330         nla_nest_cancel(skb, opts);
331         return -EMSGSIZE;
332 }
333
334 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
335 {
336         struct red_sched_data *q = qdisc_priv(sch);
337         struct net_device *dev = qdisc_dev(sch);
338         struct tc_red_xstats st = {0};
339
340         if (sch->flags & TCQ_F_OFFLOADED) {
341                 struct tc_red_qopt_offload hw_stats_request = {
342                         .command = TC_RED_XSTATS,
343                         .handle = sch->handle,
344                         .parent = sch->parent,
345                         {
346                                 .xstats = &q->stats,
347                         },
348                 };
349                 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
350                                               &hw_stats_request);
351         }
352         st.early = q->stats.prob_drop + q->stats.forced_drop;
353         st.pdrop = q->stats.pdrop;
354         st.other = q->stats.other;
355         st.marked = q->stats.prob_mark + q->stats.forced_mark;
356
357         return gnet_stats_copy_app(d, &st, sizeof(st));
358 }
359
360 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
361                           struct sk_buff *skb, struct tcmsg *tcm)
362 {
363         struct red_sched_data *q = qdisc_priv(sch);
364
365         tcm->tcm_handle |= TC_H_MIN(1);
366         tcm->tcm_info = q->qdisc->handle;
367         return 0;
368 }
369
370 static void red_graft_offload(struct Qdisc *sch,
371                               struct Qdisc *new, struct Qdisc *old,
372                               struct netlink_ext_ack *extack)
373 {
374         struct tc_red_qopt_offload graft_offload = {
375                 .handle         = sch->handle,
376                 .parent         = sch->parent,
377                 .child_handle   = new->handle,
378                 .command        = TC_RED_GRAFT,
379         };
380
381         qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
382                                    TC_SETUP_QDISC_RED, &graft_offload, extack);
383 }
384
385 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
386                      struct Qdisc **old, struct netlink_ext_ack *extack)
387 {
388         struct red_sched_data *q = qdisc_priv(sch);
389
390         if (new == NULL)
391                 new = &noop_qdisc;
392
393         *old = qdisc_replace(sch, new, &q->qdisc);
394
395         red_graft_offload(sch, new, *old, extack);
396         return 0;
397 }
398
399 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
400 {
401         struct red_sched_data *q = qdisc_priv(sch);
402         return q->qdisc;
403 }
404
405 static unsigned long red_find(struct Qdisc *sch, u32 classid)
406 {
407         return 1;
408 }
409
410 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
411 {
412         if (!walker->stop) {
413                 if (walker->count >= walker->skip)
414                         if (walker->fn(sch, 1, walker) < 0) {
415                                 walker->stop = 1;
416                                 return;
417                         }
418                 walker->count++;
419         }
420 }
421
422 static const struct Qdisc_class_ops red_class_ops = {
423         .graft          =       red_graft,
424         .leaf           =       red_leaf,
425         .find           =       red_find,
426         .walk           =       red_walk,
427         .dump           =       red_dump_class,
428 };
429
430 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
431         .id             =       "red",
432         .priv_size      =       sizeof(struct red_sched_data),
433         .cl_ops         =       &red_class_ops,
434         .enqueue        =       red_enqueue,
435         .dequeue        =       red_dequeue,
436         .peek           =       red_peek,
437         .init           =       red_init,
438         .reset          =       red_reset,
439         .destroy        =       red_destroy,
440         .change         =       red_change,
441         .dump           =       red_dump,
442         .dump_stats     =       red_dump_stats,
443         .owner          =       THIS_MODULE,
444 };
445
446 static int __init red_module_init(void)
447 {
448         return register_qdisc(&red_qdisc_ops);
449 }
450
451 static void __exit red_module_exit(void)
452 {
453         unregister_qdisc(&red_qdisc_ops);
454 }
455
456 module_init(red_module_init)
457 module_exit(red_module_exit)
458
459 MODULE_LICENSE("GPL");