GNU Linux-libre 4.19.211-gnu1
[releases.git] / net / sched / sch_fq_codel.c
1 /*
2  * Fair Queue CoDel discipline
3  *
4  *      This program is free software; you can redistribute it and/or
5  *      modify it under the terms of the GNU General Public License
6  *      as published by the Free Software Foundation; either version
7  *      2 of the License, or (at your option) any later version.
8  *
9  *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
17 #include <linux/in.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/pkt_cls.h>
27 #include <net/codel.h>
28 #include <net/codel_impl.h>
29 #include <net/codel_qdisc.h>
30
31 /*      Fair Queue CoDel.
32  *
33  * Principles :
34  * Packets are classified (internal classifier or external) on flows.
35  * This is a Stochastic model (as we use a hash, several flows
36  *                             might be hashed on same slot)
37  * Each flow has a CoDel managed queue.
38  * Flows are linked onto two (Round Robin) lists,
39  * so that new flows have priority on old ones.
40  *
41  * For a given flow, packets are not reordered (CoDel uses a FIFO)
42  * head drops only.
43  * ECN capability is on by default.
44  * Low memory footprint (64 bytes per flow)
45  */
46
47 struct fq_codel_flow {
48         struct sk_buff    *head;
49         struct sk_buff    *tail;
50         struct list_head  flowchain;
51         int               deficit;
52         u32               dropped; /* number of drops (or ECN marks) on this flow */
53         struct codel_vars cvars;
54 }; /* please try to keep this structure <= 64 bytes */
55
56 struct fq_codel_sched_data {
57         struct tcf_proto __rcu *filter_list; /* optional external classifier */
58         struct tcf_block *block;
59         struct fq_codel_flow *flows;    /* Flows table [flows_cnt] */
60         u32             *backlogs;      /* backlog table [flows_cnt] */
61         u32             flows_cnt;      /* number of flows */
62         u32             quantum;        /* psched_mtu(qdisc_dev(sch)); */
63         u32             drop_batch_size;
64         u32             memory_limit;
65         struct codel_params cparams;
66         struct codel_stats cstats;
67         u32             memory_usage;
68         u32             drop_overmemory;
69         u32             drop_overlimit;
70         u32             new_flow_count;
71
72         struct list_head new_flows;     /* list of new flows */
73         struct list_head old_flows;     /* list of old flows */
74 };
75
76 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
77                                   struct sk_buff *skb)
78 {
79         return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
80 }
81
82 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
83                                       int *qerr)
84 {
85         struct fq_codel_sched_data *q = qdisc_priv(sch);
86         struct tcf_proto *filter;
87         struct tcf_result res;
88         int result;
89
90         if (TC_H_MAJ(skb->priority) == sch->handle &&
91             TC_H_MIN(skb->priority) > 0 &&
92             TC_H_MIN(skb->priority) <= q->flows_cnt)
93                 return TC_H_MIN(skb->priority);
94
95         filter = rcu_dereference_bh(q->filter_list);
96         if (!filter)
97                 return fq_codel_hash(q, skb) + 1;
98
99         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
100         result = tcf_classify(skb, filter, &res, false);
101         if (result >= 0) {
102 #ifdef CONFIG_NET_CLS_ACT
103                 switch (result) {
104                 case TC_ACT_STOLEN:
105                 case TC_ACT_QUEUED:
106                 case TC_ACT_TRAP:
107                         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
108                         /* fall through */
109                 case TC_ACT_SHOT:
110                         return 0;
111                 }
112 #endif
113                 if (TC_H_MIN(res.classid) <= q->flows_cnt)
114                         return TC_H_MIN(res.classid);
115         }
116         return 0;
117 }
118
119 /* helper functions : might be changed when/if skb use a standard list_head */
120
121 /* remove one skb from head of slot queue */
122 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
123 {
124         struct sk_buff *skb = flow->head;
125
126         flow->head = skb->next;
127         skb->next = NULL;
128         return skb;
129 }
130
131 /* add skb to flow queue (tail add) */
132 static inline void flow_queue_add(struct fq_codel_flow *flow,
133                                   struct sk_buff *skb)
134 {
135         if (flow->head == NULL)
136                 flow->head = skb;
137         else
138                 flow->tail->next = skb;
139         flow->tail = skb;
140         skb->next = NULL;
141 }
142
143 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
144                                   struct sk_buff **to_free)
145 {
146         struct fq_codel_sched_data *q = qdisc_priv(sch);
147         struct sk_buff *skb;
148         unsigned int maxbacklog = 0, idx = 0, i, len;
149         struct fq_codel_flow *flow;
150         unsigned int threshold;
151         unsigned int mem = 0;
152
153         /* Queue is full! Find the fat flow and drop packet(s) from it.
154          * This might sound expensive, but with 1024 flows, we scan
155          * 4KB of memory, and we dont need to handle a complex tree
156          * in fast path (packet queue/enqueue) with many cache misses.
157          * In stress mode, we'll try to drop 64 packets from the flow,
158          * amortizing this linear lookup to one cache line per drop.
159          */
160         for (i = 0; i < q->flows_cnt; i++) {
161                 if (q->backlogs[i] > maxbacklog) {
162                         maxbacklog = q->backlogs[i];
163                         idx = i;
164                 }
165         }
166
167         /* Our goal is to drop half of this fat flow backlog */
168         threshold = maxbacklog >> 1;
169
170         flow = &q->flows[idx];
171         len = 0;
172         i = 0;
173         do {
174                 skb = dequeue_head(flow);
175                 len += qdisc_pkt_len(skb);
176                 mem += get_codel_cb(skb)->mem_usage;
177                 __qdisc_drop(skb, to_free);
178         } while (++i < max_packets && len < threshold);
179
180         flow->dropped += i;
181         q->backlogs[idx] -= len;
182         q->memory_usage -= mem;
183         sch->qstats.drops += i;
184         sch->qstats.backlog -= len;
185         sch->q.qlen -= i;
186         return idx;
187 }
188
189 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
190                             struct sk_buff **to_free)
191 {
192         struct fq_codel_sched_data *q = qdisc_priv(sch);
193         unsigned int idx, prev_backlog, prev_qlen;
194         struct fq_codel_flow *flow;
195         int uninitialized_var(ret);
196         unsigned int pkt_len;
197         bool memory_limited;
198
199         idx = fq_codel_classify(skb, sch, &ret);
200         if (idx == 0) {
201                 if (ret & __NET_XMIT_BYPASS)
202                         qdisc_qstats_drop(sch);
203                 __qdisc_drop(skb, to_free);
204                 return ret;
205         }
206         idx--;
207
208         codel_set_enqueue_time(skb);
209         flow = &q->flows[idx];
210         flow_queue_add(flow, skb);
211         q->backlogs[idx] += qdisc_pkt_len(skb);
212         qdisc_qstats_backlog_inc(sch, skb);
213
214         if (list_empty(&flow->flowchain)) {
215                 list_add_tail(&flow->flowchain, &q->new_flows);
216                 q->new_flow_count++;
217                 flow->deficit = q->quantum;
218                 flow->dropped = 0;
219         }
220         get_codel_cb(skb)->mem_usage = skb->truesize;
221         q->memory_usage += get_codel_cb(skb)->mem_usage;
222         memory_limited = q->memory_usage > q->memory_limit;
223         if (++sch->q.qlen <= sch->limit && !memory_limited)
224                 return NET_XMIT_SUCCESS;
225
226         prev_backlog = sch->qstats.backlog;
227         prev_qlen = sch->q.qlen;
228
229         /* save this packet length as it might be dropped by fq_codel_drop() */
230         pkt_len = qdisc_pkt_len(skb);
231         /* fq_codel_drop() is quite expensive, as it performs a linear search
232          * in q->backlogs[] to find a fat flow.
233          * So instead of dropping a single packet, drop half of its backlog
234          * with a 64 packets limit to not add a too big cpu spike here.
235          */
236         ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
237
238         prev_qlen -= sch->q.qlen;
239         prev_backlog -= sch->qstats.backlog;
240         q->drop_overlimit += prev_qlen;
241         if (memory_limited)
242                 q->drop_overmemory += prev_qlen;
243
244         /* As we dropped packet(s), better let upper stack know this.
245          * If we dropped a packet for this flow, return NET_XMIT_CN,
246          * but in this case, our parents wont increase their backlogs.
247          */
248         if (ret == idx) {
249                 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
250                                           prev_backlog - pkt_len);
251                 return NET_XMIT_CN;
252         }
253         qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
254         return NET_XMIT_SUCCESS;
255 }
256
257 /* This is the specific function called from codel_dequeue()
258  * to dequeue a packet from queue. Note: backlog is handled in
259  * codel, we dont need to reduce it here.
260  */
261 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
262 {
263         struct Qdisc *sch = ctx;
264         struct fq_codel_sched_data *q = qdisc_priv(sch);
265         struct fq_codel_flow *flow;
266         struct sk_buff *skb = NULL;
267
268         flow = container_of(vars, struct fq_codel_flow, cvars);
269         if (flow->head) {
270                 skb = dequeue_head(flow);
271                 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
272                 q->memory_usage -= get_codel_cb(skb)->mem_usage;
273                 sch->q.qlen--;
274                 sch->qstats.backlog -= qdisc_pkt_len(skb);
275         }
276         return skb;
277 }
278
279 static void drop_func(struct sk_buff *skb, void *ctx)
280 {
281         struct Qdisc *sch = ctx;
282
283         kfree_skb(skb);
284         qdisc_qstats_drop(sch);
285 }
286
287 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
288 {
289         struct fq_codel_sched_data *q = qdisc_priv(sch);
290         struct sk_buff *skb;
291         struct fq_codel_flow *flow;
292         struct list_head *head;
293         u32 prev_drop_count, prev_ecn_mark;
294
295 begin:
296         head = &q->new_flows;
297         if (list_empty(head)) {
298                 head = &q->old_flows;
299                 if (list_empty(head))
300                         return NULL;
301         }
302         flow = list_first_entry(head, struct fq_codel_flow, flowchain);
303
304         if (flow->deficit <= 0) {
305                 flow->deficit += q->quantum;
306                 list_move_tail(&flow->flowchain, &q->old_flows);
307                 goto begin;
308         }
309
310         prev_drop_count = q->cstats.drop_count;
311         prev_ecn_mark = q->cstats.ecn_mark;
312
313         skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
314                             &flow->cvars, &q->cstats, qdisc_pkt_len,
315                             codel_get_enqueue_time, drop_func, dequeue_func);
316
317         flow->dropped += q->cstats.drop_count - prev_drop_count;
318         flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
319
320         if (!skb) {
321                 /* force a pass through old_flows to prevent starvation */
322                 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
323                         list_move_tail(&flow->flowchain, &q->old_flows);
324                 else
325                         list_del_init(&flow->flowchain);
326                 goto begin;
327         }
328         qdisc_bstats_update(sch, skb);
329         flow->deficit -= qdisc_pkt_len(skb);
330         /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
331          * or HTB crashes. Defer it for next round.
332          */
333         if (q->cstats.drop_count && sch->q.qlen) {
334                 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
335                                           q->cstats.drop_len);
336                 q->cstats.drop_count = 0;
337                 q->cstats.drop_len = 0;
338         }
339         return skb;
340 }
341
342 static void fq_codel_flow_purge(struct fq_codel_flow *flow)
343 {
344         rtnl_kfree_skbs(flow->head, flow->tail);
345         flow->head = NULL;
346 }
347
348 static void fq_codel_reset(struct Qdisc *sch)
349 {
350         struct fq_codel_sched_data *q = qdisc_priv(sch);
351         int i;
352
353         INIT_LIST_HEAD(&q->new_flows);
354         INIT_LIST_HEAD(&q->old_flows);
355         for (i = 0; i < q->flows_cnt; i++) {
356                 struct fq_codel_flow *flow = q->flows + i;
357
358                 fq_codel_flow_purge(flow);
359                 INIT_LIST_HEAD(&flow->flowchain);
360                 codel_vars_init(&flow->cvars);
361         }
362         memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
363         sch->q.qlen = 0;
364         sch->qstats.backlog = 0;
365         q->memory_usage = 0;
366 }
367
368 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
369         [TCA_FQ_CODEL_TARGET]   = { .type = NLA_U32 },
370         [TCA_FQ_CODEL_LIMIT]    = { .type = NLA_U32 },
371         [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
372         [TCA_FQ_CODEL_ECN]      = { .type = NLA_U32 },
373         [TCA_FQ_CODEL_FLOWS]    = { .type = NLA_U32 },
374         [TCA_FQ_CODEL_QUANTUM]  = { .type = NLA_U32 },
375         [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
376         [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
377         [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
378 };
379
380 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
381                            struct netlink_ext_ack *extack)
382 {
383         struct fq_codel_sched_data *q = qdisc_priv(sch);
384         struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
385         u32 quantum = 0;
386         int err;
387
388         if (!opt)
389                 return -EINVAL;
390
391         err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy,
392                                NULL);
393         if (err < 0)
394                 return err;
395         if (tb[TCA_FQ_CODEL_FLOWS]) {
396                 if (q->flows)
397                         return -EINVAL;
398                 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
399                 if (!q->flows_cnt ||
400                     q->flows_cnt > 65536)
401                         return -EINVAL;
402         }
403         if (tb[TCA_FQ_CODEL_QUANTUM]) {
404                 quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
405                 if (quantum > FQ_CODEL_QUANTUM_MAX) {
406                         NL_SET_ERR_MSG(extack, "Invalid quantum");
407                         return -EINVAL;
408                 }
409         }
410         sch_tree_lock(sch);
411
412         if (tb[TCA_FQ_CODEL_TARGET]) {
413                 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
414
415                 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
416         }
417
418         if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
419                 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
420
421                 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
422         }
423
424         if (tb[TCA_FQ_CODEL_INTERVAL]) {
425                 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
426
427                 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
428         }
429
430         if (tb[TCA_FQ_CODEL_LIMIT])
431                 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
432
433         if (tb[TCA_FQ_CODEL_ECN])
434                 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
435
436         if (quantum)
437                 q->quantum = quantum;
438
439         if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
440                 q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
441
442         if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
443                 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
444
445         while (sch->q.qlen > sch->limit ||
446                q->memory_usage > q->memory_limit) {
447                 struct sk_buff *skb = fq_codel_dequeue(sch);
448
449                 q->cstats.drop_len += qdisc_pkt_len(skb);
450                 rtnl_kfree_skbs(skb, skb);
451                 q->cstats.drop_count++;
452         }
453         qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
454         q->cstats.drop_count = 0;
455         q->cstats.drop_len = 0;
456
457         sch_tree_unlock(sch);
458         return 0;
459 }
460
461 static void fq_codel_destroy(struct Qdisc *sch)
462 {
463         struct fq_codel_sched_data *q = qdisc_priv(sch);
464
465         tcf_block_put(q->block);
466         kvfree(q->backlogs);
467         kvfree(q->flows);
468 }
469
470 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
471                          struct netlink_ext_ack *extack)
472 {
473         struct fq_codel_sched_data *q = qdisc_priv(sch);
474         int i;
475         int err;
476
477         sch->limit = 10*1024;
478         q->flows_cnt = 1024;
479         q->memory_limit = 32 << 20; /* 32 MBytes */
480         q->drop_batch_size = 64;
481         q->quantum = psched_mtu(qdisc_dev(sch));
482         INIT_LIST_HEAD(&q->new_flows);
483         INIT_LIST_HEAD(&q->old_flows);
484         codel_params_init(&q->cparams);
485         codel_stats_init(&q->cstats);
486         q->cparams.ecn = true;
487         q->cparams.mtu = psched_mtu(qdisc_dev(sch));
488
489         if (opt) {
490                 err = fq_codel_change(sch, opt, extack);
491                 if (err)
492                         goto init_failure;
493         }
494
495         err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
496         if (err)
497                 goto init_failure;
498
499         if (!q->flows) {
500                 q->flows = kvcalloc(q->flows_cnt,
501                                     sizeof(struct fq_codel_flow),
502                                     GFP_KERNEL);
503                 if (!q->flows) {
504                         err = -ENOMEM;
505                         goto init_failure;
506                 }
507                 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
508                 if (!q->backlogs) {
509                         err = -ENOMEM;
510                         goto alloc_failure;
511                 }
512                 for (i = 0; i < q->flows_cnt; i++) {
513                         struct fq_codel_flow *flow = q->flows + i;
514
515                         INIT_LIST_HEAD(&flow->flowchain);
516                         codel_vars_init(&flow->cvars);
517                 }
518         }
519         if (sch->limit >= 1)
520                 sch->flags |= TCQ_F_CAN_BYPASS;
521         else
522                 sch->flags &= ~TCQ_F_CAN_BYPASS;
523         return 0;
524
525 alloc_failure:
526         kvfree(q->flows);
527         q->flows = NULL;
528 init_failure:
529         q->flows_cnt = 0;
530         return err;
531 }
532
533 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
534 {
535         struct fq_codel_sched_data *q = qdisc_priv(sch);
536         struct nlattr *opts;
537
538         opts = nla_nest_start(skb, TCA_OPTIONS);
539         if (opts == NULL)
540                 goto nla_put_failure;
541
542         if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
543                         codel_time_to_us(q->cparams.target)) ||
544             nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
545                         sch->limit) ||
546             nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
547                         codel_time_to_us(q->cparams.interval)) ||
548             nla_put_u32(skb, TCA_FQ_CODEL_ECN,
549                         q->cparams.ecn) ||
550             nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
551                         q->quantum) ||
552             nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
553                         q->drop_batch_size) ||
554             nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
555                         q->memory_limit) ||
556             nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
557                         q->flows_cnt))
558                 goto nla_put_failure;
559
560         if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
561             nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
562                         codel_time_to_us(q->cparams.ce_threshold)))
563                 goto nla_put_failure;
564
565         return nla_nest_end(skb, opts);
566
567 nla_put_failure:
568         return -1;
569 }
570
571 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
572 {
573         struct fq_codel_sched_data *q = qdisc_priv(sch);
574         struct tc_fq_codel_xstats st = {
575                 .type                           = TCA_FQ_CODEL_XSTATS_QDISC,
576         };
577         struct list_head *pos;
578
579         st.qdisc_stats.maxpacket = q->cstats.maxpacket;
580         st.qdisc_stats.drop_overlimit = q->drop_overlimit;
581         st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
582         st.qdisc_stats.new_flow_count = q->new_flow_count;
583         st.qdisc_stats.ce_mark = q->cstats.ce_mark;
584         st.qdisc_stats.memory_usage  = q->memory_usage;
585         st.qdisc_stats.drop_overmemory = q->drop_overmemory;
586
587         sch_tree_lock(sch);
588         list_for_each(pos, &q->new_flows)
589                 st.qdisc_stats.new_flows_len++;
590
591         list_for_each(pos, &q->old_flows)
592                 st.qdisc_stats.old_flows_len++;
593         sch_tree_unlock(sch);
594
595         return gnet_stats_copy_app(d, &st, sizeof(st));
596 }
597
598 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
599 {
600         return NULL;
601 }
602
603 static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
604 {
605         return 0;
606 }
607
608 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
609                               u32 classid)
610 {
611         return 0;
612 }
613
614 static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
615 {
616 }
617
618 static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
619                                             struct netlink_ext_ack *extack)
620 {
621         struct fq_codel_sched_data *q = qdisc_priv(sch);
622
623         if (cl)
624                 return NULL;
625         return q->block;
626 }
627
628 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
629                           struct sk_buff *skb, struct tcmsg *tcm)
630 {
631         tcm->tcm_handle |= TC_H_MIN(cl);
632         return 0;
633 }
634
635 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
636                                      struct gnet_dump *d)
637 {
638         struct fq_codel_sched_data *q = qdisc_priv(sch);
639         u32 idx = cl - 1;
640         struct gnet_stats_queue qs = { 0 };
641         struct tc_fq_codel_xstats xstats;
642
643         if (idx < q->flows_cnt) {
644                 const struct fq_codel_flow *flow = &q->flows[idx];
645                 const struct sk_buff *skb;
646
647                 memset(&xstats, 0, sizeof(xstats));
648                 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
649                 xstats.class_stats.deficit = flow->deficit;
650                 xstats.class_stats.ldelay =
651                         codel_time_to_us(flow->cvars.ldelay);
652                 xstats.class_stats.count = flow->cvars.count;
653                 xstats.class_stats.lastcount = flow->cvars.lastcount;
654                 xstats.class_stats.dropping = flow->cvars.dropping;
655                 if (flow->cvars.dropping) {
656                         codel_tdiff_t delta = flow->cvars.drop_next -
657                                               codel_get_time();
658
659                         xstats.class_stats.drop_next = (delta >= 0) ?
660                                 codel_time_to_us(delta) :
661                                 -codel_time_to_us(-delta);
662                 }
663                 if (flow->head) {
664                         sch_tree_lock(sch);
665                         skb = flow->head;
666                         while (skb) {
667                                 qs.qlen++;
668                                 skb = skb->next;
669                         }
670                         sch_tree_unlock(sch);
671                 }
672                 qs.backlog = q->backlogs[idx];
673                 qs.drops = flow->dropped;
674         }
675         if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
676                 return -1;
677         if (idx < q->flows_cnt)
678                 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
679         return 0;
680 }
681
682 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
683 {
684         struct fq_codel_sched_data *q = qdisc_priv(sch);
685         unsigned int i;
686
687         if (arg->stop)
688                 return;
689
690         for (i = 0; i < q->flows_cnt; i++) {
691                 if (list_empty(&q->flows[i].flowchain) ||
692                     arg->count < arg->skip) {
693                         arg->count++;
694                         continue;
695                 }
696                 if (arg->fn(sch, i + 1, arg) < 0) {
697                         arg->stop = 1;
698                         break;
699                 }
700                 arg->count++;
701         }
702 }
703
704 static const struct Qdisc_class_ops fq_codel_class_ops = {
705         .leaf           =       fq_codel_leaf,
706         .find           =       fq_codel_find,
707         .tcf_block      =       fq_codel_tcf_block,
708         .bind_tcf       =       fq_codel_bind,
709         .unbind_tcf     =       fq_codel_unbind,
710         .dump           =       fq_codel_dump_class,
711         .dump_stats     =       fq_codel_dump_class_stats,
712         .walk           =       fq_codel_walk,
713 };
714
715 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
716         .cl_ops         =       &fq_codel_class_ops,
717         .id             =       "fq_codel",
718         .priv_size      =       sizeof(struct fq_codel_sched_data),
719         .enqueue        =       fq_codel_enqueue,
720         .dequeue        =       fq_codel_dequeue,
721         .peek           =       qdisc_peek_dequeued,
722         .init           =       fq_codel_init,
723         .reset          =       fq_codel_reset,
724         .destroy        =       fq_codel_destroy,
725         .change         =       fq_codel_change,
726         .dump           =       fq_codel_dump,
727         .dump_stats =   fq_codel_dump_stats,
728         .owner          =       THIS_MODULE,
729 };
730
731 static int __init fq_codel_module_init(void)
732 {
733         return register_qdisc(&fq_codel_qdisc_ops);
734 }
735
736 static void __exit fq_codel_module_exit(void)
737 {
738         unregister_qdisc(&fq_codel_qdisc_ops);
739 }
740
741 module_init(fq_codel_module_init)
742 module_exit(fq_codel_module_exit)
743 MODULE_AUTHOR("Eric Dumazet");
744 MODULE_LICENSE("GPL");