GNU Linux-libre 4.19.211-gnu1
[releases.git] / net / sched / sch_fq.c
1 /*
2  * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
3  *
4  *  Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  *
11  *  Meant to be mostly used for locally generated traffic :
12  *  Fast classification depends on skb->sk being set before reaching us.
13  *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14  *  All packets belonging to a socket are considered as a 'flow'.
15  *
16  *  Flows are dynamically allocated and stored in a hash table of RB trees
17  *  They are also part of one Round Robin 'queues' (new or old flows)
18  *
19  *  Burst avoidance (aka pacing) capability :
20  *
21  *  Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
22  *  bunch of packets, and this packet scheduler adds delay between
23  *  packets to respect rate limitation.
24  *
25  *  enqueue() :
26  *   - lookup one RB tree (out of 1024 or more) to find the flow.
27  *     If non existent flow, create it, add it to the tree.
28  *     Add skb to the per flow list of skb (fifo).
29  *   - Use a special fifo for high prio packets
30  *
31  *  dequeue() : serves flows in Round Robin
32  *  Note : When a flow becomes empty, we do not immediately remove it from
33  *  rb trees, for performance reasons (its expected to send additional packets,
34  *  or SLAB cache will reuse socket for another flow)
35  */
36
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/jiffies.h>
41 #include <linux/string.h>
42 #include <linux/in.h>
43 #include <linux/errno.h>
44 #include <linux/init.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/rbtree.h>
48 #include <linux/hash.h>
49 #include <linux/prefetch.h>
50 #include <linux/vmalloc.h>
51 #include <net/netlink.h>
52 #include <net/pkt_sched.h>
53 #include <net/sock.h>
54 #include <net/tcp_states.h>
55 #include <net/tcp.h>
56
57 /*
58  * Per flow structure, dynamically allocated
59  */
60 struct fq_flow {
61         struct sk_buff  *head;          /* list of skbs for this flow : first skb */
62         union {
63                 struct sk_buff *tail;   /* last skb in the list */
64                 unsigned long  age;     /* jiffies when flow was emptied, for gc */
65         };
66         struct rb_node  fq_node;        /* anchor in fq_root[] trees */
67         struct sock     *sk;
68         int             qlen;           /* number of packets in flow queue */
69         int             credit;
70         u32             socket_hash;    /* sk_hash */
71         struct fq_flow *next;           /* next pointer in RR lists, or &detached */
72
73         struct rb_node  rate_node;      /* anchor in q->delayed tree */
74         u64             time_next_packet;
75 };
76
77 struct fq_flow_head {
78         struct fq_flow *first;
79         struct fq_flow *last;
80 };
81
82 struct fq_sched_data {
83         struct fq_flow_head new_flows;
84
85         struct fq_flow_head old_flows;
86
87         struct rb_root  delayed;        /* for rate limited flows */
88         u64             time_next_delayed_flow;
89         unsigned long   unthrottle_latency_ns;
90
91         struct fq_flow  internal;       /* for non classified or high prio packets */
92         u32             quantum;
93         u32             initial_quantum;
94         u32             flow_refill_delay;
95         u32             flow_max_rate;  /* optional max rate per flow */
96         u32             flow_plimit;    /* max packets per flow */
97         u32             orphan_mask;    /* mask for orphaned skb */
98         u32             low_rate_threshold;
99         struct rb_root  *fq_root;
100         u8              rate_enable;
101         u8              fq_trees_log;
102
103         u32             flows;
104         u32             inactive_flows;
105         u32             throttled_flows;
106
107         u64             stat_gc_flows;
108         u64             stat_internal_packets;
109         u64             stat_tcp_retrans;
110         u64             stat_throttled;
111         u64             stat_flows_plimit;
112         u64             stat_pkts_too_long;
113         u64             stat_allocation_errors;
114         struct qdisc_watchdog watchdog;
115 };
116
117 /* special value to mark a detached flow (not on old/new list) */
118 static struct fq_flow detached, throttled;
119
120 static void fq_flow_set_detached(struct fq_flow *f)
121 {
122         f->next = &detached;
123         f->age = jiffies;
124 }
125
126 static bool fq_flow_is_detached(const struct fq_flow *f)
127 {
128         return f->next == &detached;
129 }
130
131 static bool fq_flow_is_throttled(const struct fq_flow *f)
132 {
133         return f->next == &throttled;
134 }
135
136 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
137 {
138         if (head->first)
139                 head->last->next = flow;
140         else
141                 head->first = flow;
142         head->last = flow;
143         flow->next = NULL;
144 }
145
146 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
147 {
148         rb_erase(&f->rate_node, &q->delayed);
149         q->throttled_flows--;
150         fq_flow_add_tail(&q->old_flows, f);
151 }
152
153 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
154 {
155         struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
156
157         while (*p) {
158                 struct fq_flow *aux;
159
160                 parent = *p;
161                 aux = rb_entry(parent, struct fq_flow, rate_node);
162                 if (f->time_next_packet >= aux->time_next_packet)
163                         p = &parent->rb_right;
164                 else
165                         p = &parent->rb_left;
166         }
167         rb_link_node(&f->rate_node, parent, p);
168         rb_insert_color(&f->rate_node, &q->delayed);
169         q->throttled_flows++;
170         q->stat_throttled++;
171
172         f->next = &throttled;
173         if (q->time_next_delayed_flow > f->time_next_packet)
174                 q->time_next_delayed_flow = f->time_next_packet;
175 }
176
177
178 static struct kmem_cache *fq_flow_cachep __read_mostly;
179
180
181 /* limit number of collected flows per round */
182 #define FQ_GC_MAX 8
183 #define FQ_GC_AGE (3*HZ)
184
185 static bool fq_gc_candidate(const struct fq_flow *f)
186 {
187         return fq_flow_is_detached(f) &&
188                time_after(jiffies, f->age + FQ_GC_AGE);
189 }
190
191 static void fq_gc(struct fq_sched_data *q,
192                   struct rb_root *root,
193                   struct sock *sk)
194 {
195         struct fq_flow *f, *tofree[FQ_GC_MAX];
196         struct rb_node **p, *parent;
197         int fcnt = 0;
198
199         p = &root->rb_node;
200         parent = NULL;
201         while (*p) {
202                 parent = *p;
203
204                 f = rb_entry(parent, struct fq_flow, fq_node);
205                 if (f->sk == sk)
206                         break;
207
208                 if (fq_gc_candidate(f)) {
209                         tofree[fcnt++] = f;
210                         if (fcnt == FQ_GC_MAX)
211                                 break;
212                 }
213
214                 if (f->sk > sk)
215                         p = &parent->rb_right;
216                 else
217                         p = &parent->rb_left;
218         }
219
220         q->flows -= fcnt;
221         q->inactive_flows -= fcnt;
222         q->stat_gc_flows += fcnt;
223         while (fcnt) {
224                 struct fq_flow *f = tofree[--fcnt];
225
226                 rb_erase(&f->fq_node, root);
227                 kmem_cache_free(fq_flow_cachep, f);
228         }
229 }
230
231 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
232 {
233         struct rb_node **p, *parent;
234         struct sock *sk = skb->sk;
235         struct rb_root *root;
236         struct fq_flow *f;
237
238         /* warning: no starvation prevention... */
239         if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
240                 return &q->internal;
241
242         /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
243          * or a listener (SYNCOOKIE mode)
244          * 1) request sockets are not full blown,
245          *    they do not contain sk_pacing_rate
246          * 2) They are not part of a 'flow' yet
247          * 3) We do not want to rate limit them (eg SYNFLOOD attack),
248          *    especially if the listener set SO_MAX_PACING_RATE
249          * 4) We pretend they are orphaned
250          */
251         if (!sk || sk_listener(sk)) {
252                 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
253
254                 /* By forcing low order bit to 1, we make sure to not
255                  * collide with a local flow (socket pointers are word aligned)
256                  */
257                 sk = (struct sock *)((hash << 1) | 1UL);
258                 skb_orphan(skb);
259         }
260
261         root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
262
263         if (q->flows >= (2U << q->fq_trees_log) &&
264             q->inactive_flows > q->flows/2)
265                 fq_gc(q, root, sk);
266
267         p = &root->rb_node;
268         parent = NULL;
269         while (*p) {
270                 parent = *p;
271
272                 f = rb_entry(parent, struct fq_flow, fq_node);
273                 if (f->sk == sk) {
274                         /* socket might have been reallocated, so check
275                          * if its sk_hash is the same.
276                          * It not, we need to refill credit with
277                          * initial quantum
278                          */
279                         if (unlikely(skb->sk &&
280                                      f->socket_hash != sk->sk_hash)) {
281                                 f->credit = q->initial_quantum;
282                                 f->socket_hash = sk->sk_hash;
283                                 if (fq_flow_is_throttled(f))
284                                         fq_flow_unset_throttled(q, f);
285                                 f->time_next_packet = 0ULL;
286                         }
287                         return f;
288                 }
289                 if (f->sk > sk)
290                         p = &parent->rb_right;
291                 else
292                         p = &parent->rb_left;
293         }
294
295         f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
296         if (unlikely(!f)) {
297                 q->stat_allocation_errors++;
298                 return &q->internal;
299         }
300         fq_flow_set_detached(f);
301         f->sk = sk;
302         if (skb->sk)
303                 f->socket_hash = sk->sk_hash;
304         f->credit = q->initial_quantum;
305
306         rb_link_node(&f->fq_node, parent, p);
307         rb_insert_color(&f->fq_node, root);
308
309         q->flows++;
310         q->inactive_flows++;
311         return f;
312 }
313
314
315 /* remove one skb from head of flow queue */
316 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
317 {
318         struct sk_buff *skb = flow->head;
319
320         if (skb) {
321                 flow->head = skb->next;
322                 skb->next = NULL;
323                 flow->qlen--;
324                 qdisc_qstats_backlog_dec(sch, skb);
325                 sch->q.qlen--;
326         }
327         return skb;
328 }
329
330 /* We might add in the future detection of retransmits
331  * For the time being, just return false
332  */
333 static bool skb_is_retransmit(struct sk_buff *skb)
334 {
335         return false;
336 }
337
338 /* add skb to flow queue
339  * flow queue is a linked list, kind of FIFO, except for TCP retransmits
340  * We special case tcp retransmits to be transmitted before other packets.
341  * We rely on fact that TCP retransmits are unlikely, so we do not waste
342  * a separate queue or a pointer.
343  * head->  [retrans pkt 1]
344  *         [retrans pkt 2]
345  *         [ normal pkt 1]
346  *         [ normal pkt 2]
347  *         [ normal pkt 3]
348  * tail->  [ normal pkt 4]
349  */
350 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
351 {
352         struct sk_buff *prev, *head = flow->head;
353
354         skb->next = NULL;
355         if (!head) {
356                 flow->head = skb;
357                 flow->tail = skb;
358                 return;
359         }
360         if (likely(!skb_is_retransmit(skb))) {
361                 flow->tail->next = skb;
362                 flow->tail = skb;
363                 return;
364         }
365
366         /* This skb is a tcp retransmit,
367          * find the last retrans packet in the queue
368          */
369         prev = NULL;
370         while (skb_is_retransmit(head)) {
371                 prev = head;
372                 head = head->next;
373                 if (!head)
374                         break;
375         }
376         if (!prev) { /* no rtx packet in queue, become the new head */
377                 skb->next = flow->head;
378                 flow->head = skb;
379         } else {
380                 if (prev == flow->tail)
381                         flow->tail = skb;
382                 else
383                         skb->next = prev->next;
384                 prev->next = skb;
385         }
386 }
387
388 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
389                       struct sk_buff **to_free)
390 {
391         struct fq_sched_data *q = qdisc_priv(sch);
392         struct fq_flow *f;
393
394         if (unlikely(sch->q.qlen >= sch->limit))
395                 return qdisc_drop(skb, sch, to_free);
396
397         f = fq_classify(skb, q);
398         if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
399                 q->stat_flows_plimit++;
400                 return qdisc_drop(skb, sch, to_free);
401         }
402
403         f->qlen++;
404         if (skb_is_retransmit(skb))
405                 q->stat_tcp_retrans++;
406         qdisc_qstats_backlog_inc(sch, skb);
407         if (fq_flow_is_detached(f)) {
408                 struct sock *sk = skb->sk;
409
410                 fq_flow_add_tail(&q->new_flows, f);
411                 if (time_after(jiffies, f->age + q->flow_refill_delay))
412                         f->credit = max_t(u32, f->credit, q->quantum);
413                 if (sk && q->rate_enable) {
414                         if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
415                                      SK_PACING_FQ))
416                                 smp_store_release(&sk->sk_pacing_status,
417                                                   SK_PACING_FQ);
418                 }
419                 q->inactive_flows--;
420         }
421
422         /* Note: this overwrites f->age */
423         flow_queue_add(f, skb);
424
425         if (unlikely(f == &q->internal)) {
426                 q->stat_internal_packets++;
427         }
428         sch->q.qlen++;
429
430         return NET_XMIT_SUCCESS;
431 }
432
433 static void fq_check_throttled(struct fq_sched_data *q, u64 now)
434 {
435         unsigned long sample;
436         struct rb_node *p;
437
438         if (q->time_next_delayed_flow > now)
439                 return;
440
441         /* Update unthrottle latency EWMA.
442          * This is cheap and can help diagnosing timer/latency problems.
443          */
444         sample = (unsigned long)(now - q->time_next_delayed_flow);
445         q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
446         q->unthrottle_latency_ns += sample >> 3;
447
448         q->time_next_delayed_flow = ~0ULL;
449         while ((p = rb_first(&q->delayed)) != NULL) {
450                 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
451
452                 if (f->time_next_packet > now) {
453                         q->time_next_delayed_flow = f->time_next_packet;
454                         break;
455                 }
456                 fq_flow_unset_throttled(q, f);
457         }
458 }
459
460 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
461 {
462         struct fq_sched_data *q = qdisc_priv(sch);
463         u64 now = ktime_get_ns();
464         struct fq_flow_head *head;
465         struct sk_buff *skb;
466         struct fq_flow *f;
467         u32 rate, plen;
468
469         skb = fq_dequeue_head(sch, &q->internal);
470         if (skb)
471                 goto out;
472         fq_check_throttled(q, now);
473 begin:
474         head = &q->new_flows;
475         if (!head->first) {
476                 head = &q->old_flows;
477                 if (!head->first) {
478                         if (q->time_next_delayed_flow != ~0ULL)
479                                 qdisc_watchdog_schedule_ns(&q->watchdog,
480                                                            q->time_next_delayed_flow);
481                         return NULL;
482                 }
483         }
484         f = head->first;
485
486         if (f->credit <= 0) {
487                 f->credit += q->quantum;
488                 head->first = f->next;
489                 fq_flow_add_tail(&q->old_flows, f);
490                 goto begin;
491         }
492
493         skb = f->head;
494         if (unlikely(skb && now < f->time_next_packet &&
495                      !skb_is_tcp_pure_ack(skb))) {
496                 head->first = f->next;
497                 fq_flow_set_throttled(q, f);
498                 goto begin;
499         }
500
501         skb = fq_dequeue_head(sch, f);
502         if (!skb) {
503                 head->first = f->next;
504                 /* force a pass through old_flows to prevent starvation */
505                 if ((head == &q->new_flows) && q->old_flows.first) {
506                         fq_flow_add_tail(&q->old_flows, f);
507                 } else {
508                         fq_flow_set_detached(f);
509                         q->inactive_flows++;
510                 }
511                 goto begin;
512         }
513         prefetch(&skb->end);
514         f->credit -= qdisc_pkt_len(skb);
515
516         if (!q->rate_enable)
517                 goto out;
518
519         /* Do not pace locally generated ack packets */
520         if (skb_is_tcp_pure_ack(skb))
521                 goto out;
522
523         rate = q->flow_max_rate;
524         if (skb->sk)
525                 rate = min(skb->sk->sk_pacing_rate, rate);
526
527         if (rate <= q->low_rate_threshold) {
528                 f->credit = 0;
529                 plen = qdisc_pkt_len(skb);
530         } else {
531                 plen = max(qdisc_pkt_len(skb), q->quantum);
532                 if (f->credit > 0)
533                         goto out;
534         }
535         if (rate != ~0U) {
536                 u64 len = (u64)plen * NSEC_PER_SEC;
537
538                 if (likely(rate))
539                         do_div(len, rate);
540                 /* Since socket rate can change later,
541                  * clamp the delay to 1 second.
542                  * Really, providers of too big packets should be fixed !
543                  */
544                 if (unlikely(len > NSEC_PER_SEC)) {
545                         len = NSEC_PER_SEC;
546                         q->stat_pkts_too_long++;
547                 }
548                 /* Account for schedule/timers drifts.
549                  * f->time_next_packet was set when prior packet was sent,
550                  * and current time (@now) can be too late by tens of us.
551                  */
552                 if (f->time_next_packet)
553                         len -= min(len/2, now - f->time_next_packet);
554                 f->time_next_packet = now + len;
555         }
556 out:
557         qdisc_bstats_update(sch, skb);
558         return skb;
559 }
560
561 static void fq_flow_purge(struct fq_flow *flow)
562 {
563         rtnl_kfree_skbs(flow->head, flow->tail);
564         flow->head = NULL;
565         flow->qlen = 0;
566 }
567
568 static void fq_reset(struct Qdisc *sch)
569 {
570         struct fq_sched_data *q = qdisc_priv(sch);
571         struct rb_root *root;
572         struct rb_node *p;
573         struct fq_flow *f;
574         unsigned int idx;
575
576         sch->q.qlen = 0;
577         sch->qstats.backlog = 0;
578
579         fq_flow_purge(&q->internal);
580
581         if (!q->fq_root)
582                 return;
583
584         for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
585                 root = &q->fq_root[idx];
586                 while ((p = rb_first(root)) != NULL) {
587                         f = rb_entry(p, struct fq_flow, fq_node);
588                         rb_erase(p, root);
589
590                         fq_flow_purge(f);
591
592                         kmem_cache_free(fq_flow_cachep, f);
593                 }
594         }
595         q->new_flows.first      = NULL;
596         q->old_flows.first      = NULL;
597         q->delayed              = RB_ROOT;
598         q->flows                = 0;
599         q->inactive_flows       = 0;
600         q->throttled_flows      = 0;
601 }
602
603 static void fq_rehash(struct fq_sched_data *q,
604                       struct rb_root *old_array, u32 old_log,
605                       struct rb_root *new_array, u32 new_log)
606 {
607         struct rb_node *op, **np, *parent;
608         struct rb_root *oroot, *nroot;
609         struct fq_flow *of, *nf;
610         int fcnt = 0;
611         u32 idx;
612
613         for (idx = 0; idx < (1U << old_log); idx++) {
614                 oroot = &old_array[idx];
615                 while ((op = rb_first(oroot)) != NULL) {
616                         rb_erase(op, oroot);
617                         of = rb_entry(op, struct fq_flow, fq_node);
618                         if (fq_gc_candidate(of)) {
619                                 fcnt++;
620                                 kmem_cache_free(fq_flow_cachep, of);
621                                 continue;
622                         }
623                         nroot = &new_array[hash_ptr(of->sk, new_log)];
624
625                         np = &nroot->rb_node;
626                         parent = NULL;
627                         while (*np) {
628                                 parent = *np;
629
630                                 nf = rb_entry(parent, struct fq_flow, fq_node);
631                                 BUG_ON(nf->sk == of->sk);
632
633                                 if (nf->sk > of->sk)
634                                         np = &parent->rb_right;
635                                 else
636                                         np = &parent->rb_left;
637                         }
638
639                         rb_link_node(&of->fq_node, parent, np);
640                         rb_insert_color(&of->fq_node, nroot);
641                 }
642         }
643         q->flows -= fcnt;
644         q->inactive_flows -= fcnt;
645         q->stat_gc_flows += fcnt;
646 }
647
648 static void fq_free(void *addr)
649 {
650         kvfree(addr);
651 }
652
653 static int fq_resize(struct Qdisc *sch, u32 log)
654 {
655         struct fq_sched_data *q = qdisc_priv(sch);
656         struct rb_root *array;
657         void *old_fq_root;
658         u32 idx;
659
660         if (q->fq_root && log == q->fq_trees_log)
661                 return 0;
662
663         /* If XPS was setup, we can allocate memory on right NUMA node */
664         array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
665                               netdev_queue_numa_node_read(sch->dev_queue));
666         if (!array)
667                 return -ENOMEM;
668
669         for (idx = 0; idx < (1U << log); idx++)
670                 array[idx] = RB_ROOT;
671
672         sch_tree_lock(sch);
673
674         old_fq_root = q->fq_root;
675         if (old_fq_root)
676                 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
677
678         q->fq_root = array;
679         q->fq_trees_log = log;
680
681         sch_tree_unlock(sch);
682
683         fq_free(old_fq_root);
684
685         return 0;
686 }
687
688 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
689         [TCA_FQ_PLIMIT]                 = { .type = NLA_U32 },
690         [TCA_FQ_FLOW_PLIMIT]            = { .type = NLA_U32 },
691         [TCA_FQ_QUANTUM]                = { .type = NLA_U32 },
692         [TCA_FQ_INITIAL_QUANTUM]        = { .type = NLA_U32 },
693         [TCA_FQ_RATE_ENABLE]            = { .type = NLA_U32 },
694         [TCA_FQ_FLOW_DEFAULT_RATE]      = { .type = NLA_U32 },
695         [TCA_FQ_FLOW_MAX_RATE]          = { .type = NLA_U32 },
696         [TCA_FQ_BUCKETS_LOG]            = { .type = NLA_U32 },
697         [TCA_FQ_FLOW_REFILL_DELAY]      = { .type = NLA_U32 },
698         [TCA_FQ_ORPHAN_MASK]            = { .type = NLA_U32 },
699         [TCA_FQ_LOW_RATE_THRESHOLD]     = { .type = NLA_U32 },
700 };
701
702 static int fq_change(struct Qdisc *sch, struct nlattr *opt,
703                      struct netlink_ext_ack *extack)
704 {
705         struct fq_sched_data *q = qdisc_priv(sch);
706         struct nlattr *tb[TCA_FQ_MAX + 1];
707         int err, drop_count = 0;
708         unsigned drop_len = 0;
709         u32 fq_log;
710
711         if (!opt)
712                 return -EINVAL;
713
714         err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy, NULL);
715         if (err < 0)
716                 return err;
717
718         sch_tree_lock(sch);
719
720         fq_log = q->fq_trees_log;
721
722         if (tb[TCA_FQ_BUCKETS_LOG]) {
723                 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
724
725                 if (nval >= 1 && nval <= ilog2(256*1024))
726                         fq_log = nval;
727                 else
728                         err = -EINVAL;
729         }
730         if (tb[TCA_FQ_PLIMIT])
731                 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
732
733         if (tb[TCA_FQ_FLOW_PLIMIT])
734                 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
735
736         if (tb[TCA_FQ_QUANTUM]) {
737                 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
738
739                 if (quantum > 0 && quantum <= (1 << 20)) {
740                         q->quantum = quantum;
741                 } else {
742                         NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
743                         err = -EINVAL;
744                 }
745         }
746
747         if (tb[TCA_FQ_INITIAL_QUANTUM])
748                 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
749
750         if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
751                 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
752                                     nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
753
754         if (tb[TCA_FQ_FLOW_MAX_RATE])
755                 q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
756
757         if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
758                 q->low_rate_threshold =
759                         nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
760
761         if (tb[TCA_FQ_RATE_ENABLE]) {
762                 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
763
764                 if (enable <= 1)
765                         q->rate_enable = enable;
766                 else
767                         err = -EINVAL;
768         }
769
770         if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
771                 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
772
773                 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
774         }
775
776         if (tb[TCA_FQ_ORPHAN_MASK])
777                 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
778
779         if (!err) {
780                 sch_tree_unlock(sch);
781                 err = fq_resize(sch, fq_log);
782                 sch_tree_lock(sch);
783         }
784         while (sch->q.qlen > sch->limit) {
785                 struct sk_buff *skb = fq_dequeue(sch);
786
787                 if (!skb)
788                         break;
789                 drop_len += qdisc_pkt_len(skb);
790                 rtnl_kfree_skbs(skb, skb);
791                 drop_count++;
792         }
793         qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
794
795         sch_tree_unlock(sch);
796         return err;
797 }
798
799 static void fq_destroy(struct Qdisc *sch)
800 {
801         struct fq_sched_data *q = qdisc_priv(sch);
802
803         fq_reset(sch);
804         fq_free(q->fq_root);
805         qdisc_watchdog_cancel(&q->watchdog);
806 }
807
808 static int fq_init(struct Qdisc *sch, struct nlattr *opt,
809                    struct netlink_ext_ack *extack)
810 {
811         struct fq_sched_data *q = qdisc_priv(sch);
812         int err;
813
814         sch->limit              = 10000;
815         q->flow_plimit          = 100;
816         q->quantum              = 2 * psched_mtu(qdisc_dev(sch));
817         q->initial_quantum      = 10 * psched_mtu(qdisc_dev(sch));
818         q->flow_refill_delay    = msecs_to_jiffies(40);
819         q->flow_max_rate        = ~0U;
820         q->time_next_delayed_flow = ~0ULL;
821         q->rate_enable          = 1;
822         q->new_flows.first      = NULL;
823         q->old_flows.first      = NULL;
824         q->delayed              = RB_ROOT;
825         q->fq_root              = NULL;
826         q->fq_trees_log         = ilog2(1024);
827         q->orphan_mask          = 1024 - 1;
828         q->low_rate_threshold   = 550000 / 8;
829         qdisc_watchdog_init(&q->watchdog, sch);
830
831         if (opt)
832                 err = fq_change(sch, opt, extack);
833         else
834                 err = fq_resize(sch, q->fq_trees_log);
835
836         return err;
837 }
838
839 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
840 {
841         struct fq_sched_data *q = qdisc_priv(sch);
842         struct nlattr *opts;
843
844         opts = nla_nest_start(skb, TCA_OPTIONS);
845         if (opts == NULL)
846                 goto nla_put_failure;
847
848         /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
849
850         if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
851             nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
852             nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
853             nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
854             nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
855             nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
856             nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
857                         jiffies_to_usecs(q->flow_refill_delay)) ||
858             nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
859             nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
860                         q->low_rate_threshold) ||
861             nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
862                 goto nla_put_failure;
863
864         return nla_nest_end(skb, opts);
865
866 nla_put_failure:
867         return -1;
868 }
869
870 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
871 {
872         struct fq_sched_data *q = qdisc_priv(sch);
873         struct tc_fq_qd_stats st;
874
875         sch_tree_lock(sch);
876
877         st.gc_flows               = q->stat_gc_flows;
878         st.highprio_packets       = q->stat_internal_packets;
879         st.tcp_retrans            = q->stat_tcp_retrans;
880         st.throttled              = q->stat_throttled;
881         st.flows_plimit           = q->stat_flows_plimit;
882         st.pkts_too_long          = q->stat_pkts_too_long;
883         st.allocation_errors      = q->stat_allocation_errors;
884         st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
885         st.flows                  = q->flows;
886         st.inactive_flows         = q->inactive_flows;
887         st.throttled_flows        = q->throttled_flows;
888         st.unthrottle_latency_ns  = min_t(unsigned long,
889                                           q->unthrottle_latency_ns, ~0U);
890         sch_tree_unlock(sch);
891
892         return gnet_stats_copy_app(d, &st, sizeof(st));
893 }
894
895 static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
896         .id             =       "fq",
897         .priv_size      =       sizeof(struct fq_sched_data),
898
899         .enqueue        =       fq_enqueue,
900         .dequeue        =       fq_dequeue,
901         .peek           =       qdisc_peek_dequeued,
902         .init           =       fq_init,
903         .reset          =       fq_reset,
904         .destroy        =       fq_destroy,
905         .change         =       fq_change,
906         .dump           =       fq_dump,
907         .dump_stats     =       fq_dump_stats,
908         .owner          =       THIS_MODULE,
909 };
910
911 static int __init fq_module_init(void)
912 {
913         int ret;
914
915         fq_flow_cachep = kmem_cache_create("fq_flow_cache",
916                                            sizeof(struct fq_flow),
917                                            0, 0, NULL);
918         if (!fq_flow_cachep)
919                 return -ENOMEM;
920
921         ret = register_qdisc(&fq_qdisc_ops);
922         if (ret)
923                 kmem_cache_destroy(fq_flow_cachep);
924         return ret;
925 }
926
927 static void __exit fq_module_exit(void)
928 {
929         unregister_qdisc(&fq_qdisc_ops);
930         kmem_cache_destroy(fq_flow_cachep);
931 }
932
933 module_init(fq_module_init)
934 module_exit(fq_module_exit)
935 MODULE_AUTHOR("Eric Dumazet");
936 MODULE_LICENSE("GPL");