2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
4 * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Meant to be mostly used for locally generated traffic :
12 * Fast classification depends on skb->sk being set before reaching us.
13 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14 * All packets belonging to a socket are considered as a 'flow'.
16 * Flows are dynamically allocated and stored in a hash table of RB trees
17 * They are also part of one Round Robin 'queues' (new or old flows)
19 * Burst avoidance (aka pacing) capability :
21 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
22 * bunch of packets, and this packet scheduler adds delay between
23 * packets to respect rate limitation.
26 * - lookup one RB tree (out of 1024 or more) to find the flow.
27 * If non existent flow, create it, add it to the tree.
28 * Add skb to the per flow list of skb (fifo).
29 * - Use a special fifo for high prio packets
31 * dequeue() : serves flows in Round Robin
32 * Note : When a flow becomes empty, we do not immediately remove it from
33 * rb trees, for performance reasons (its expected to send additional packets,
34 * or SLAB cache will reuse socket for another flow)
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/jiffies.h>
41 #include <linux/string.h>
43 #include <linux/errno.h>
44 #include <linux/init.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/rbtree.h>
48 #include <linux/hash.h>
49 #include <linux/prefetch.h>
50 #include <linux/vmalloc.h>
51 #include <net/netlink.h>
52 #include <net/pkt_sched.h>
54 #include <net/tcp_states.h>
58 * Per flow structure, dynamically allocated
61 struct sk_buff *head; /* list of skbs for this flow : first skb */
63 struct sk_buff *tail; /* last skb in the list */
64 unsigned long age; /* jiffies when flow was emptied, for gc */
66 struct rb_node fq_node; /* anchor in fq_root[] trees */
68 int qlen; /* number of packets in flow queue */
70 u32 socket_hash; /* sk_hash */
71 struct fq_flow *next; /* next pointer in RR lists, or &detached */
73 struct rb_node rate_node; /* anchor in q->delayed tree */
78 struct fq_flow *first;
82 struct fq_sched_data {
83 struct fq_flow_head new_flows;
85 struct fq_flow_head old_flows;
87 struct rb_root delayed; /* for rate limited flows */
88 u64 time_next_delayed_flow;
89 unsigned long unthrottle_latency_ns;
91 struct fq_flow internal; /* for non classified or high prio packets */
94 u32 flow_refill_delay;
95 u32 flow_max_rate; /* optional max rate per flow */
96 u32 flow_plimit; /* max packets per flow */
97 u32 orphan_mask; /* mask for orphaned skb */
98 u32 low_rate_threshold;
99 struct rb_root *fq_root;
108 u64 stat_internal_packets;
109 u64 stat_tcp_retrans;
111 u64 stat_flows_plimit;
112 u64 stat_pkts_too_long;
113 u64 stat_allocation_errors;
114 struct qdisc_watchdog watchdog;
117 /* special value to mark a detached flow (not on old/new list) */
118 static struct fq_flow detached, throttled;
120 static void fq_flow_set_detached(struct fq_flow *f)
126 static bool fq_flow_is_detached(const struct fq_flow *f)
128 return f->next == &detached;
131 static bool fq_flow_is_throttled(const struct fq_flow *f)
133 return f->next == &throttled;
136 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
139 head->last->next = flow;
146 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
148 rb_erase(&f->rate_node, &q->delayed);
149 q->throttled_flows--;
150 fq_flow_add_tail(&q->old_flows, f);
153 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
155 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
161 aux = container_of(parent, struct fq_flow, rate_node);
162 if (f->time_next_packet >= aux->time_next_packet)
163 p = &parent->rb_right;
165 p = &parent->rb_left;
167 rb_link_node(&f->rate_node, parent, p);
168 rb_insert_color(&f->rate_node, &q->delayed);
169 q->throttled_flows++;
172 f->next = &throttled;
173 if (q->time_next_delayed_flow > f->time_next_packet)
174 q->time_next_delayed_flow = f->time_next_packet;
178 static struct kmem_cache *fq_flow_cachep __read_mostly;
181 /* limit number of collected flows per round */
183 #define FQ_GC_AGE (3*HZ)
185 static bool fq_gc_candidate(const struct fq_flow *f)
187 return fq_flow_is_detached(f) &&
188 time_after(jiffies, f->age + FQ_GC_AGE);
191 static void fq_gc(struct fq_sched_data *q,
192 struct rb_root *root,
195 struct fq_flow *f, *tofree[FQ_GC_MAX];
196 struct rb_node **p, *parent;
204 f = container_of(parent, struct fq_flow, fq_node);
208 if (fq_gc_candidate(f)) {
210 if (fcnt == FQ_GC_MAX)
215 p = &parent->rb_right;
217 p = &parent->rb_left;
221 q->inactive_flows -= fcnt;
222 q->stat_gc_flows += fcnt;
224 struct fq_flow *f = tofree[--fcnt];
226 rb_erase(&f->fq_node, root);
227 kmem_cache_free(fq_flow_cachep, f);
231 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
233 struct rb_node **p, *parent;
234 struct sock *sk = skb->sk;
235 struct rb_root *root;
238 /* warning: no starvation prevention... */
239 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
242 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
243 * or a listener (SYNCOOKIE mode)
244 * 1) request sockets are not full blown,
245 * they do not contain sk_pacing_rate
246 * 2) They are not part of a 'flow' yet
247 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
248 * especially if the listener set SO_MAX_PACING_RATE
249 * 4) We pretend they are orphaned
251 if (!sk || sk_listener(sk)) {
252 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
254 /* By forcing low order bit to 1, we make sure to not
255 * collide with a local flow (socket pointers are word aligned)
257 sk = (struct sock *)((hash << 1) | 1UL);
261 root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
263 if (q->flows >= (2U << q->fq_trees_log) &&
264 q->inactive_flows > q->flows/2)
272 f = container_of(parent, struct fq_flow, fq_node);
274 /* socket might have been reallocated, so check
275 * if its sk_hash is the same.
276 * It not, we need to refill credit with
279 if (unlikely(skb->sk &&
280 f->socket_hash != sk->sk_hash)) {
281 f->credit = q->initial_quantum;
282 f->socket_hash = sk->sk_hash;
283 if (fq_flow_is_throttled(f))
284 fq_flow_unset_throttled(q, f);
285 f->time_next_packet = 0ULL;
290 p = &parent->rb_right;
292 p = &parent->rb_left;
295 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
297 q->stat_allocation_errors++;
300 fq_flow_set_detached(f);
303 f->socket_hash = sk->sk_hash;
304 f->credit = q->initial_quantum;
306 rb_link_node(&f->fq_node, parent, p);
307 rb_insert_color(&f->fq_node, root);
315 /* remove one skb from head of flow queue */
316 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
318 struct sk_buff *skb = flow->head;
321 flow->head = skb->next;
324 qdisc_qstats_backlog_dec(sch, skb);
330 /* We might add in the future detection of retransmits
331 * For the time being, just return false
333 static bool skb_is_retransmit(struct sk_buff *skb)
338 /* add skb to flow queue
339 * flow queue is a linked list, kind of FIFO, except for TCP retransmits
340 * We special case tcp retransmits to be transmitted before other packets.
341 * We rely on fact that TCP retransmits are unlikely, so we do not waste
342 * a separate queue or a pointer.
343 * head-> [retrans pkt 1]
348 * tail-> [ normal pkt 4]
350 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
352 struct sk_buff *prev, *head = flow->head;
360 if (likely(!skb_is_retransmit(skb))) {
361 flow->tail->next = skb;
366 /* This skb is a tcp retransmit,
367 * find the last retrans packet in the queue
370 while (skb_is_retransmit(head)) {
376 if (!prev) { /* no rtx packet in queue, become the new head */
377 skb->next = flow->head;
380 if (prev == flow->tail)
383 skb->next = prev->next;
388 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
389 struct sk_buff **to_free)
391 struct fq_sched_data *q = qdisc_priv(sch);
394 if (unlikely(sch->q.qlen >= sch->limit))
395 return qdisc_drop(skb, sch, to_free);
397 f = fq_classify(skb, q);
398 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
399 q->stat_flows_plimit++;
400 return qdisc_drop(skb, sch, to_free);
404 if (skb_is_retransmit(skb))
405 q->stat_tcp_retrans++;
406 qdisc_qstats_backlog_inc(sch, skb);
407 if (fq_flow_is_detached(f)) {
408 fq_flow_add_tail(&q->new_flows, f);
409 if (time_after(jiffies, f->age + q->flow_refill_delay))
410 f->credit = max_t(u32, f->credit, q->quantum);
414 /* Note: this overwrites f->age */
415 flow_queue_add(f, skb);
417 if (unlikely(f == &q->internal)) {
418 q->stat_internal_packets++;
422 return NET_XMIT_SUCCESS;
425 static void fq_check_throttled(struct fq_sched_data *q, u64 now)
427 unsigned long sample;
430 if (q->time_next_delayed_flow > now)
433 /* Update unthrottle latency EWMA.
434 * This is cheap and can help diagnosing timer/latency problems.
436 sample = (unsigned long)(now - q->time_next_delayed_flow);
437 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
438 q->unthrottle_latency_ns += sample >> 3;
440 q->time_next_delayed_flow = ~0ULL;
441 while ((p = rb_first(&q->delayed)) != NULL) {
442 struct fq_flow *f = container_of(p, struct fq_flow, rate_node);
444 if (f->time_next_packet > now) {
445 q->time_next_delayed_flow = f->time_next_packet;
448 fq_flow_unset_throttled(q, f);
452 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
454 struct fq_sched_data *q = qdisc_priv(sch);
455 u64 now = ktime_get_ns();
456 struct fq_flow_head *head;
461 skb = fq_dequeue_head(sch, &q->internal);
464 fq_check_throttled(q, now);
466 head = &q->new_flows;
468 head = &q->old_flows;
470 if (q->time_next_delayed_flow != ~0ULL)
471 qdisc_watchdog_schedule_ns(&q->watchdog,
472 q->time_next_delayed_flow);
478 if (f->credit <= 0) {
479 f->credit += q->quantum;
480 head->first = f->next;
481 fq_flow_add_tail(&q->old_flows, f);
486 if (unlikely(skb && now < f->time_next_packet &&
487 !skb_is_tcp_pure_ack(skb))) {
488 head->first = f->next;
489 fq_flow_set_throttled(q, f);
493 skb = fq_dequeue_head(sch, f);
495 head->first = f->next;
496 /* force a pass through old_flows to prevent starvation */
497 if ((head == &q->new_flows) && q->old_flows.first) {
498 fq_flow_add_tail(&q->old_flows, f);
500 fq_flow_set_detached(f);
506 f->credit -= qdisc_pkt_len(skb);
511 /* Do not pace locally generated ack packets */
512 if (skb_is_tcp_pure_ack(skb))
515 rate = q->flow_max_rate;
517 rate = min(skb->sk->sk_pacing_rate, rate);
519 if (rate <= q->low_rate_threshold) {
521 plen = qdisc_pkt_len(skb);
523 plen = max(qdisc_pkt_len(skb), q->quantum);
528 u64 len = (u64)plen * NSEC_PER_SEC;
532 /* Since socket rate can change later,
533 * clamp the delay to 1 second.
534 * Really, providers of too big packets should be fixed !
536 if (unlikely(len > NSEC_PER_SEC)) {
538 q->stat_pkts_too_long++;
540 /* Account for schedule/timers drifts.
541 * f->time_next_packet was set when prior packet was sent,
542 * and current time (@now) can be too late by tens of us.
544 if (f->time_next_packet)
545 len -= min(len/2, now - f->time_next_packet);
546 f->time_next_packet = now + len;
549 qdisc_bstats_update(sch, skb);
553 static void fq_flow_purge(struct fq_flow *flow)
555 rtnl_kfree_skbs(flow->head, flow->tail);
560 static void fq_reset(struct Qdisc *sch)
562 struct fq_sched_data *q = qdisc_priv(sch);
563 struct rb_root *root;
569 sch->qstats.backlog = 0;
571 fq_flow_purge(&q->internal);
576 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
577 root = &q->fq_root[idx];
578 while ((p = rb_first(root)) != NULL) {
579 f = container_of(p, struct fq_flow, fq_node);
584 kmem_cache_free(fq_flow_cachep, f);
587 q->new_flows.first = NULL;
588 q->old_flows.first = NULL;
589 q->delayed = RB_ROOT;
591 q->inactive_flows = 0;
592 q->throttled_flows = 0;
595 static void fq_rehash(struct fq_sched_data *q,
596 struct rb_root *old_array, u32 old_log,
597 struct rb_root *new_array, u32 new_log)
599 struct rb_node *op, **np, *parent;
600 struct rb_root *oroot, *nroot;
601 struct fq_flow *of, *nf;
605 for (idx = 0; idx < (1U << old_log); idx++) {
606 oroot = &old_array[idx];
607 while ((op = rb_first(oroot)) != NULL) {
609 of = container_of(op, struct fq_flow, fq_node);
610 if (fq_gc_candidate(of)) {
612 kmem_cache_free(fq_flow_cachep, of);
615 nroot = &new_array[hash_32((u32)(long)of->sk, new_log)];
617 np = &nroot->rb_node;
622 nf = container_of(parent, struct fq_flow, fq_node);
623 BUG_ON(nf->sk == of->sk);
626 np = &parent->rb_right;
628 np = &parent->rb_left;
631 rb_link_node(&of->fq_node, parent, np);
632 rb_insert_color(&of->fq_node, nroot);
636 q->inactive_flows -= fcnt;
637 q->stat_gc_flows += fcnt;
640 static void *fq_alloc_node(size_t sz, int node)
644 ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node);
646 ptr = vmalloc_node(sz, node);
650 static void fq_free(void *addr)
655 static int fq_resize(struct Qdisc *sch, u32 log)
657 struct fq_sched_data *q = qdisc_priv(sch);
658 struct rb_root *array;
662 if (q->fq_root && log == q->fq_trees_log)
665 /* If XPS was setup, we can allocate memory on right NUMA node */
666 array = fq_alloc_node(sizeof(struct rb_root) << log,
667 netdev_queue_numa_node_read(sch->dev_queue));
671 for (idx = 0; idx < (1U << log); idx++)
672 array[idx] = RB_ROOT;
676 old_fq_root = q->fq_root;
678 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
681 q->fq_trees_log = log;
683 sch_tree_unlock(sch);
685 fq_free(old_fq_root);
690 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
691 [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
692 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
693 [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
694 [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
695 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
696 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
697 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
698 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
699 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
700 [TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 },
701 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
704 static int fq_change(struct Qdisc *sch, struct nlattr *opt)
706 struct fq_sched_data *q = qdisc_priv(sch);
707 struct nlattr *tb[TCA_FQ_MAX + 1];
708 int err, drop_count = 0;
709 unsigned drop_len = 0;
715 err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy);
721 fq_log = q->fq_trees_log;
723 if (tb[TCA_FQ_BUCKETS_LOG]) {
724 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
726 if (nval >= 1 && nval <= ilog2(256*1024))
731 if (tb[TCA_FQ_PLIMIT])
732 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
734 if (tb[TCA_FQ_FLOW_PLIMIT])
735 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
737 if (tb[TCA_FQ_QUANTUM]) {
738 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
740 if (quantum > 0 && quantum <= (1 << 20))
741 q->quantum = quantum;
746 if (tb[TCA_FQ_INITIAL_QUANTUM])
747 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
749 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
750 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
751 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
753 if (tb[TCA_FQ_FLOW_MAX_RATE])
754 q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
756 if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
757 q->low_rate_threshold =
758 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
760 if (tb[TCA_FQ_RATE_ENABLE]) {
761 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
764 q->rate_enable = enable;
769 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
770 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
772 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
775 if (tb[TCA_FQ_ORPHAN_MASK])
776 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
779 sch_tree_unlock(sch);
780 err = fq_resize(sch, fq_log);
783 while (sch->q.qlen > sch->limit) {
784 struct sk_buff *skb = fq_dequeue(sch);
788 drop_len += qdisc_pkt_len(skb);
789 rtnl_kfree_skbs(skb, skb);
792 qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
794 sch_tree_unlock(sch);
798 static void fq_destroy(struct Qdisc *sch)
800 struct fq_sched_data *q = qdisc_priv(sch);
804 qdisc_watchdog_cancel(&q->watchdog);
807 static int fq_init(struct Qdisc *sch, struct nlattr *opt)
809 struct fq_sched_data *q = qdisc_priv(sch);
813 q->flow_plimit = 100;
814 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
815 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
816 q->flow_refill_delay = msecs_to_jiffies(40);
817 q->flow_max_rate = ~0U;
818 q->time_next_delayed_flow = ~0ULL;
820 q->new_flows.first = NULL;
821 q->old_flows.first = NULL;
822 q->delayed = RB_ROOT;
824 q->fq_trees_log = ilog2(1024);
825 q->orphan_mask = 1024 - 1;
826 q->low_rate_threshold = 550000 / 8;
827 qdisc_watchdog_init(&q->watchdog, sch);
830 err = fq_change(sch, opt);
832 err = fq_resize(sch, q->fq_trees_log);
837 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
839 struct fq_sched_data *q = qdisc_priv(sch);
842 opts = nla_nest_start(skb, TCA_OPTIONS);
844 goto nla_put_failure;
846 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
848 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
849 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
850 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
851 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
852 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
853 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
854 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
855 jiffies_to_usecs(q->flow_refill_delay)) ||
856 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
857 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
858 q->low_rate_threshold) ||
859 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
860 goto nla_put_failure;
862 return nla_nest_end(skb, opts);
868 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
870 struct fq_sched_data *q = qdisc_priv(sch);
871 struct tc_fq_qd_stats st;
875 st.gc_flows = q->stat_gc_flows;
876 st.highprio_packets = q->stat_internal_packets;
877 st.tcp_retrans = q->stat_tcp_retrans;
878 st.throttled = q->stat_throttled;
879 st.flows_plimit = q->stat_flows_plimit;
880 st.pkts_too_long = q->stat_pkts_too_long;
881 st.allocation_errors = q->stat_allocation_errors;
882 st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
884 st.inactive_flows = q->inactive_flows;
885 st.throttled_flows = q->throttled_flows;
886 st.unthrottle_latency_ns = min_t(unsigned long,
887 q->unthrottle_latency_ns, ~0U);
888 sch_tree_unlock(sch);
890 return gnet_stats_copy_app(d, &st, sizeof(st));
893 static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
895 .priv_size = sizeof(struct fq_sched_data),
897 .enqueue = fq_enqueue,
898 .dequeue = fq_dequeue,
899 .peek = qdisc_peek_dequeued,
902 .destroy = fq_destroy,
905 .dump_stats = fq_dump_stats,
906 .owner = THIS_MODULE,
909 static int __init fq_module_init(void)
913 fq_flow_cachep = kmem_cache_create("fq_flow_cache",
914 sizeof(struct fq_flow),
919 ret = register_qdisc(&fq_qdisc_ops);
921 kmem_cache_destroy(fq_flow_cachep);
925 static void __exit fq_module_exit(void)
927 unregister_qdisc(&fq_qdisc_ops);
928 kmem_cache_destroy(fq_flow_cachep);
931 module_init(fq_module_init)
932 module_exit(fq_module_exit)
933 MODULE_AUTHOR("Eric Dumazet");
934 MODULE_LICENSE("GPL");