2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/siphash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/pkt_cls.h>
30 /* Stochastic Fairness Queuing algorithm.
31 =======================================
34 Paul E. McKenney "Stochastic Fairness Queuing",
35 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
37 Paul E. McKenney "Stochastic Fairness Queuing",
38 "Interworking: Research and Experience", v.2, 1991, p.113-131.
42 M. Shreedhar and George Varghese "Efficient Fair
43 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
46 This is not the thing that is usually called (W)FQ nowadays.
47 It does not use any timestamp mechanism, but instead
48 processes queues in round-robin order.
52 - It is very cheap. Both CPU and memory requirements are minimal.
56 - "Stochastic" -> It is not 100% fair.
57 When hash collisions occur, several flows are considered as one.
59 - "Round-robin" -> It introduces larger delays than virtual clock
60 based schemes, and should not be used for isolating interactive
61 traffic from non-interactive. It means, that this scheduler
62 should be used as leaf of CBQ or P3, which put interactive traffic
63 to higher priority band.
65 We still need true WFQ for top level CSZ, but using WFQ
66 for the best effort traffic is absolutely pointless:
67 SFQ is superior for this purpose.
70 This implementation limits :
71 - maximal queue length per flow to 127 packets.
74 - number of hash buckets to 65536.
76 It is easy to increase these values, but not in flight. */
78 #define SFQ_MAX_DEPTH 127 /* max number of packets per flow */
79 #define SFQ_DEFAULT_FLOWS 128
80 #define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
81 #define SFQ_EMPTY_SLOT 0xffff
82 #define SFQ_DEFAULT_HASH_DIVISOR 1024
84 /* We use 16 bits to store allot, and want to handle packets up to 64K
85 * Scale allot by 8 (1<<3) so that no overflow occurs.
87 #define SFQ_ALLOT_SHIFT 3
88 #define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
90 /* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
91 typedef u16 sfq_index;
94 * We dont use pointers to save space.
95 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
96 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
97 * are 'pointers' to dep[] array
105 struct sk_buff *skblist_next;
106 struct sk_buff *skblist_prev;
107 sfq_index qlen; /* number of skbs in skblist */
108 sfq_index next; /* next slot in sfq RR chain */
109 struct sfq_head dep; /* anchor in dep[] chains */
110 unsigned short hash; /* hash value (index in ht[]) */
111 short allot; /* credit for this slot */
113 unsigned int backlog;
114 struct red_vars vars;
117 struct sfq_sched_data {
118 /* frequently used fields */
119 int limit; /* limit of total number of packets in this qdisc */
120 unsigned int divisor; /* number of slots in hash table */
122 u8 maxdepth; /* limit of packets per flow */
124 siphash_key_t perturbation;
125 u8 cur_depth; /* depth of longest slot */
127 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
128 struct tcf_proto __rcu *filter_list;
129 struct tcf_block *block;
130 sfq_index *ht; /* Hash table ('divisor' slots) */
131 struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
133 struct red_parms *red_parms;
134 struct tc_sfqred_stats stats;
135 struct sfq_slot *tail; /* current slot in round */
137 struct sfq_head dep[SFQ_MAX_DEPTH + 1];
138 /* Linked lists of slots, indexed by depth
139 * dep[0] : list of unused flows
140 * dep[1] : list of flows with 1 packet
141 * dep[X] : list of flows with X packets
144 unsigned int maxflows; /* number of flows in flows array */
146 unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
147 struct timer_list perturb_timer;
152 * sfq_head are either in a sfq_slot or in dep[] array
154 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
156 if (val < SFQ_MAX_FLOWS)
157 return &q->slots[val].dep;
158 return &q->dep[val - SFQ_MAX_FLOWS];
161 static unsigned int sfq_hash(const struct sfq_sched_data *q,
162 const struct sk_buff *skb)
164 return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
167 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
170 struct sfq_sched_data *q = qdisc_priv(sch);
171 struct tcf_result res;
172 struct tcf_proto *fl;
175 if (TC_H_MAJ(skb->priority) == sch->handle &&
176 TC_H_MIN(skb->priority) > 0 &&
177 TC_H_MIN(skb->priority) <= q->divisor)
178 return TC_H_MIN(skb->priority);
180 fl = rcu_dereference_bh(q->filter_list);
182 return sfq_hash(q, skb) + 1;
184 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
185 result = tcf_classify(skb, fl, &res, false);
187 #ifdef CONFIG_NET_CLS_ACT
192 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
198 if (TC_H_MIN(res.classid) <= q->divisor)
199 return TC_H_MIN(res.classid);
205 * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
207 static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
210 struct sfq_slot *slot = &q->slots[x];
211 int qlen = slot->qlen;
213 p = qlen + SFQ_MAX_FLOWS;
214 n = q->dep[qlen].next;
219 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
220 sfq_dep_head(q, n)->prev = x;
223 #define sfq_unlink(q, x, n, p) \
225 n = q->slots[x].dep.next; \
226 p = q->slots[x].dep.prev; \
227 sfq_dep_head(q, p)->next = n; \
228 sfq_dep_head(q, n)->prev = p; \
232 static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
237 sfq_unlink(q, x, n, p);
239 d = q->slots[x].qlen--;
240 if (n == p && q->cur_depth == d)
245 static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
250 sfq_unlink(q, x, n, p);
252 d = ++q->slots[x].qlen;
253 if (q->cur_depth < d)
258 /* helper functions : might be changed when/if skb use a standard list_head */
260 /* remove one skb from tail of slot queue */
261 static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
263 struct sk_buff *skb = slot->skblist_prev;
265 slot->skblist_prev = skb->prev;
266 skb->prev->next = (struct sk_buff *)slot;
267 skb->next = skb->prev = NULL;
271 /* remove one skb from head of slot queue */
272 static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
274 struct sk_buff *skb = slot->skblist_next;
276 slot->skblist_next = skb->next;
277 skb->next->prev = (struct sk_buff *)slot;
278 skb->next = skb->prev = NULL;
282 static inline void slot_queue_init(struct sfq_slot *slot)
284 memset(slot, 0, sizeof(*slot));
285 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
288 /* add skb to slot queue (tail add) */
289 static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
291 skb->prev = slot->skblist_prev;
292 skb->next = (struct sk_buff *)slot;
293 slot->skblist_prev->next = skb;
294 slot->skblist_prev = skb;
297 static unsigned int sfq_drop(struct Qdisc *sch, struct sk_buff **to_free)
299 struct sfq_sched_data *q = qdisc_priv(sch);
300 sfq_index x, d = q->cur_depth;
303 struct sfq_slot *slot;
305 /* Queue is full! Find the longest slot and drop tail packet from it */
310 skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
311 len = qdisc_pkt_len(skb);
312 slot->backlog -= len;
315 qdisc_qstats_backlog_dec(sch, skb);
316 qdisc_drop(skb, sch, to_free);
321 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
324 q->tail->next = slot->next;
325 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
332 /* Is ECN parameter configured */
333 static int sfq_prob_mark(const struct sfq_sched_data *q)
335 return q->flags & TC_RED_ECN;
338 /* Should packets over max threshold just be marked */
339 static int sfq_hard_mark(const struct sfq_sched_data *q)
341 return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN;
344 static int sfq_headdrop(const struct sfq_sched_data *q)
350 sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
352 struct sfq_sched_data *q = qdisc_priv(sch);
353 unsigned int hash, dropped;
355 struct sfq_slot *slot;
356 int uninitialized_var(ret);
357 struct sk_buff *head;
360 hash = sfq_classify(skb, sch, &ret);
362 if (ret & __NET_XMIT_BYPASS)
363 qdisc_qstats_drop(sch);
364 __qdisc_drop(skb, to_free);
371 if (x == SFQ_EMPTY_SLOT) {
372 x = q->dep[0].next; /* get a free slot */
373 if (x >= SFQ_MAX_FLOWS)
374 return qdisc_drop(skb, sch, to_free);
378 slot->backlog = 0; /* should already be 0 anyway... */
379 red_set_vars(&slot->vars);
383 slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
386 switch (red_action(q->red_parms,
393 qdisc_qstats_overlimit(sch);
394 if (sfq_prob_mark(q)) {
395 /* We know we have at least one packet in queue */
396 if (sfq_headdrop(q) &&
397 INET_ECN_set_ce(slot->skblist_next)) {
398 q->stats.prob_mark_head++;
401 if (INET_ECN_set_ce(skb)) {
402 q->stats.prob_mark++;
406 q->stats.prob_drop++;
407 goto congestion_drop;
410 qdisc_qstats_overlimit(sch);
411 if (sfq_hard_mark(q)) {
412 /* We know we have at least one packet in queue */
413 if (sfq_headdrop(q) &&
414 INET_ECN_set_ce(slot->skblist_next)) {
415 q->stats.forced_mark_head++;
418 if (INET_ECN_set_ce(skb)) {
419 q->stats.forced_mark++;
423 q->stats.forced_drop++;
424 goto congestion_drop;
428 if (slot->qlen >= q->maxdepth) {
430 if (!sfq_headdrop(q))
431 return qdisc_drop(skb, sch, to_free);
433 /* We know we have at least one packet in queue */
434 head = slot_dequeue_head(slot);
435 delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
436 sch->qstats.backlog -= delta;
437 slot->backlog -= delta;
438 qdisc_drop(head, sch, to_free);
440 slot_queue_add(slot, skb);
441 qdisc_tree_reduce_backlog(sch, 0, delta);
446 qdisc_qstats_backlog_inc(sch, skb);
447 slot->backlog += qdisc_pkt_len(skb);
448 slot_queue_add(slot, skb);
450 if (slot->qlen == 1) { /* The flow is new */
451 if (q->tail == NULL) { /* It is the first flow */
454 slot->next = q->tail->next;
457 /* We put this flow at the end of our flow list.
458 * This might sound unfair for a new flow to wait after old ones,
459 * but we could endup servicing new flows only, and freeze old ones.
462 /* We could use a bigger initial quantum for new flows */
463 slot->allot = q->scaled_quantum;
465 if (++sch->q.qlen <= q->limit)
466 return NET_XMIT_SUCCESS;
469 dropped = sfq_drop(sch, to_free);
470 /* Return Congestion Notification only if we dropped a packet
473 if (qlen != slot->qlen) {
474 qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
478 /* As we dropped a packet, better let upper stack know this */
479 qdisc_tree_reduce_backlog(sch, 1, dropped);
480 return NET_XMIT_SUCCESS;
483 static struct sk_buff *
484 sfq_dequeue(struct Qdisc *sch)
486 struct sfq_sched_data *q = qdisc_priv(sch);
489 struct sfq_slot *slot;
491 /* No active slots */
498 if (slot->allot <= 0) {
500 slot->allot += q->scaled_quantum;
503 skb = slot_dequeue_head(slot);
505 qdisc_bstats_update(sch, skb);
507 qdisc_qstats_backlog_dec(sch, skb);
508 slot->backlog -= qdisc_pkt_len(skb);
509 /* Is the slot empty? */
510 if (slot->qlen == 0) {
511 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
514 q->tail = NULL; /* no more active slots */
517 q->tail->next = next_a;
519 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
525 sfq_reset(struct Qdisc *sch)
529 while ((skb = sfq_dequeue(sch)) != NULL)
530 rtnl_kfree_skbs(skb, skb);
534 * When q->perturbation is changed, we rehash all queued skbs
535 * to avoid OOO (Out Of Order) effects.
536 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
539 static void sfq_rehash(struct Qdisc *sch)
541 struct sfq_sched_data *q = qdisc_priv(sch);
544 struct sfq_slot *slot;
545 struct sk_buff_head list;
547 unsigned int drop_len = 0;
549 __skb_queue_head_init(&list);
551 for (i = 0; i < q->maxflows; i++) {
556 skb = slot_dequeue_head(slot);
558 __skb_queue_tail(&list, skb);
561 red_set_vars(&slot->vars);
562 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
566 while ((skb = __skb_dequeue(&list)) != NULL) {
567 unsigned int hash = sfq_hash(q, skb);
568 sfq_index x = q->ht[hash];
571 if (x == SFQ_EMPTY_SLOT) {
572 x = q->dep[0].next; /* get a free slot */
573 if (x >= SFQ_MAX_FLOWS) {
575 qdisc_qstats_backlog_dec(sch, skb);
576 drop_len += qdisc_pkt_len(skb);
585 if (slot->qlen >= q->maxdepth)
587 slot_queue_add(slot, skb);
589 slot->vars.qavg = red_calc_qavg(q->red_parms,
592 slot->backlog += qdisc_pkt_len(skb);
594 if (slot->qlen == 1) { /* The flow is new */
595 if (q->tail == NULL) { /* It is the first flow */
598 slot->next = q->tail->next;
602 slot->allot = q->scaled_quantum;
605 sch->q.qlen -= dropped;
606 qdisc_tree_reduce_backlog(sch, dropped, drop_len);
609 static void sfq_perturbation(struct timer_list *t)
611 struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
612 struct Qdisc *sch = q->sch;
613 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
616 get_random_bytes(&nkey, sizeof(nkey));
617 spin_lock(root_lock);
618 q->perturbation = nkey;
619 if (!q->filter_list && q->tail)
621 spin_unlock(root_lock);
623 if (q->perturb_period)
624 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
627 static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
629 struct sfq_sched_data *q = qdisc_priv(sch);
630 struct tc_sfq_qopt *ctl = nla_data(opt);
631 struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
632 unsigned int qlen, dropped = 0;
633 struct red_parms *p = NULL;
634 struct sk_buff *to_free = NULL;
635 struct sk_buff *tail = NULL;
637 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
639 if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
640 ctl_v1 = nla_data(opt);
642 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
645 /* slot->allot is a short, make sure quantum is not too big. */
647 unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
649 if (scaled <= 0 || scaled > SHRT_MAX)
653 if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
654 ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
656 if (ctl_v1 && ctl_v1->qth_min) {
657 p = kmalloc(sizeof(*p), GFP_KERNEL);
663 q->quantum = ctl->quantum;
664 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
666 q->perturb_period = ctl->perturb_period * HZ;
668 q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
670 q->divisor = ctl->divisor;
671 q->maxflows = min_t(u32, q->maxflows, q->divisor);
675 q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
677 swap(q->red_parms, p);
678 red_set_parms(q->red_parms,
679 ctl_v1->qth_min, ctl_v1->qth_max,
681 ctl_v1->Plog, ctl_v1->Scell_log,
685 q->flags = ctl_v1->flags;
686 q->headdrop = ctl_v1->headdrop;
689 q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
690 q->maxflows = min_t(u32, q->maxflows, q->limit);
694 while (sch->q.qlen > q->limit) {
695 dropped += sfq_drop(sch, &to_free);
700 rtnl_kfree_skbs(to_free, tail);
701 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
703 del_timer(&q->perturb_timer);
704 if (q->perturb_period) {
705 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
706 get_random_bytes(&q->perturbation, sizeof(q->perturbation));
708 sch_tree_unlock(sch);
713 static void *sfq_alloc(size_t sz)
715 return kvmalloc(sz, GFP_KERNEL);
718 static void sfq_free(void *addr)
723 static void sfq_destroy(struct Qdisc *sch)
725 struct sfq_sched_data *q = qdisc_priv(sch);
727 tcf_block_put(q->block);
728 q->perturb_period = 0;
729 del_timer_sync(&q->perturb_timer);
735 static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
736 struct netlink_ext_ack *extack)
738 struct sfq_sched_data *q = qdisc_priv(sch);
743 timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
745 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
749 for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
750 q->dep[i].next = i + SFQ_MAX_FLOWS;
751 q->dep[i].prev = i + SFQ_MAX_FLOWS;
754 q->limit = SFQ_MAX_DEPTH;
755 q->maxdepth = SFQ_MAX_DEPTH;
758 q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
759 q->maxflows = SFQ_DEFAULT_FLOWS;
760 q->quantum = psched_mtu(qdisc_dev(sch));
761 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
762 q->perturb_period = 0;
763 get_random_bytes(&q->perturbation, sizeof(q->perturbation));
766 int err = sfq_change(sch, opt);
771 q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
772 q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
773 if (!q->ht || !q->slots) {
774 /* Note: sfq_destroy() will be called by our caller */
778 for (i = 0; i < q->divisor; i++)
779 q->ht[i] = SFQ_EMPTY_SLOT;
781 for (i = 0; i < q->maxflows; i++) {
782 slot_queue_init(&q->slots[i]);
786 sch->flags |= TCQ_F_CAN_BYPASS;
788 sch->flags &= ~TCQ_F_CAN_BYPASS;
792 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
794 struct sfq_sched_data *q = qdisc_priv(sch);
795 unsigned char *b = skb_tail_pointer(skb);
796 struct tc_sfq_qopt_v1 opt;
797 struct red_parms *p = q->red_parms;
799 memset(&opt, 0, sizeof(opt));
800 opt.v0.quantum = q->quantum;
801 opt.v0.perturb_period = q->perturb_period / HZ;
802 opt.v0.limit = q->limit;
803 opt.v0.divisor = q->divisor;
804 opt.v0.flows = q->maxflows;
805 opt.depth = q->maxdepth;
806 opt.headdrop = q->headdrop;
809 opt.qth_min = p->qth_min >> p->Wlog;
810 opt.qth_max = p->qth_max >> p->Wlog;
813 opt.Scell_log = p->Scell_log;
814 opt.max_P = p->max_P;
816 memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
817 opt.flags = q->flags;
819 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
820 goto nla_put_failure;
829 static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
834 static unsigned long sfq_find(struct Qdisc *sch, u32 classid)
839 static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
845 static void sfq_unbind(struct Qdisc *q, unsigned long cl)
849 static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl,
850 struct netlink_ext_ack *extack)
852 struct sfq_sched_data *q = qdisc_priv(sch);
859 static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
860 struct sk_buff *skb, struct tcmsg *tcm)
862 tcm->tcm_handle |= TC_H_MIN(cl);
866 static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
869 struct sfq_sched_data *q = qdisc_priv(sch);
870 sfq_index idx = q->ht[cl - 1];
871 struct gnet_stats_queue qs = { 0 };
872 struct tc_sfq_xstats xstats = { 0 };
874 if (idx != SFQ_EMPTY_SLOT) {
875 const struct sfq_slot *slot = &q->slots[idx];
877 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
878 qs.qlen = slot->qlen;
879 qs.backlog = slot->backlog;
881 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
883 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
886 static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
888 struct sfq_sched_data *q = qdisc_priv(sch);
894 for (i = 0; i < q->divisor; i++) {
895 if (q->ht[i] == SFQ_EMPTY_SLOT ||
896 arg->count < arg->skip) {
900 if (arg->fn(sch, i + 1, arg) < 0) {
908 static const struct Qdisc_class_ops sfq_class_ops = {
911 .tcf_block = sfq_tcf_block,
912 .bind_tcf = sfq_bind,
913 .unbind_tcf = sfq_unbind,
914 .dump = sfq_dump_class,
915 .dump_stats = sfq_dump_class_stats,
919 static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
920 .cl_ops = &sfq_class_ops,
922 .priv_size = sizeof(struct sfq_sched_data),
923 .enqueue = sfq_enqueue,
924 .dequeue = sfq_dequeue,
925 .peek = qdisc_peek_dequeued,
928 .destroy = sfq_destroy,
931 .owner = THIS_MODULE,
934 static int __init sfq_module_init(void)
936 return register_qdisc(&sfq_qdisc_ops);
938 static void __exit sfq_module_exit(void)
940 unregister_qdisc(&sfq_qdisc_ops);
942 module_init(sfq_module_init)
943 module_exit(sfq_module_exit)
944 MODULE_LICENSE("GPL");