1 // SPDX-License-Identifier: GPL-2.0-only
3 * net/sched/sch_netem.c Network emulator
5 * Many of the algorithms and ideas for this came from
6 * NIST Net which is not copyrighted.
8 * Authors: Stephen Hemminger <shemminger@osdl.org>
9 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/vmalloc.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/reciprocal_div.h>
22 #include <linux/rbtree.h>
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
27 #include <net/inet_ecn.h>
31 /* Network Emulation Queuing algorithm.
32 ====================================
34 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
35 Network Emulation Tool
36 [2] Luigi Rizzo, DummyNet for FreeBSD
38 ----------------------------------------------------------------
40 This started out as a simple way to delay outgoing packets to
41 test TCP but has grown to include most of the functionality
42 of a full blown network emulator like NISTnet. It can delay
43 packets and add random jitter (and correlation). The random
44 distribution can be loaded from a table as well to provide
45 normal, Pareto, or experimental curves. Packet loss,
46 duplication, and reordering can also be emulated.
48 This qdisc does not do classification that can be handled in
49 layering other disciplines. It does not need to do bandwidth
50 control either since that can be handled by using token
51 bucket or other rate control.
53 Correlated Loss Generator models
55 Added generation of correlated loss according to the
56 "Gilbert-Elliot" model, a 4-state markov model.
59 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
60 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
61 and intuitive loss model for packet networks and its implementation
62 in the Netem module in the Linux kernel", available in [1]
64 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
65 Fabio Ludovici <fabio.ludovici at yahoo.it>
70 s16 table[] __counted_by(size);
73 struct netem_sched_data {
74 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
75 struct rb_root t_root;
77 /* a linear queue; reduces rbtree rebalancing when jitter is low */
78 struct sk_buff *t_head;
79 struct sk_buff *t_tail;
81 /* optional qdisc for classful handling (NULL at netem init) */
84 struct qdisc_watchdog watchdog;
100 struct reciprocal_value cell_size_reciprocal;
106 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
110 struct rnd_state prng_state;
113 struct disttable *delay_dist;
122 TX_IN_GAP_PERIOD = 1,
125 LOST_IN_BURST_PERIOD,
133 /* Correlated Loss Generation models */
135 /* state of the Markov chain */
138 /* 4-states and Gilbert-Elliot models */
139 u32 a1; /* p13 for 4-states or p for GE */
140 u32 a2; /* p31 for 4-states or r for GE */
141 u32 a3; /* p32 for 4-states or h for GE */
142 u32 a4; /* p14 for 4-states or 1-k for GE */
143 u32 a5; /* p23 used only in 4-states */
146 struct tc_netem_slot slot_config;
153 struct disttable *slot_dist;
156 /* Time stamp put into socket buffer control block
157 * Only valid when skbs are in our internal t(ime)fifo queue.
159 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
160 * and skb->next & skb->prev are scratch space for a qdisc,
161 * we save skb->tstamp value in skb->cb[] before destroying it.
163 struct netem_skb_cb {
167 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
169 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
170 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
171 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
174 /* init_crandom - initialize correlated random number generator
175 * Use entropy source for initial seed.
177 static void init_crandom(struct crndstate *state, unsigned long rho)
180 state->last = get_random_u32();
183 /* get_crandom - correlated random number generator
184 * Next number depends on last value.
185 * rho is scaled to avoid floating point.
187 static u32 get_crandom(struct crndstate *state, struct prng *p)
190 unsigned long answer;
191 struct rnd_state *s = &p->prng_state;
193 if (!state || state->rho == 0) /* no correlation */
194 return prandom_u32_state(s);
196 value = prandom_u32_state(s);
197 rho = (u64)state->rho + 1;
198 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
199 state->last = answer;
203 /* loss_4state - 4-state model loss generator
204 * Generates losses according to the 4-state Markov chain adopted in
205 * the GI (General and Intuitive) loss model.
207 static bool loss_4state(struct netem_sched_data *q)
209 struct clgstate *clg = &q->clg;
210 u32 rnd = prandom_u32_state(&q->prng.prng_state);
213 * Makes a comparison between rnd and the transition
214 * probabilities outgoing from the current state, then decides the
215 * next state and if the next packet has to be transmitted or lost.
216 * The four states correspond to:
217 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
218 * LOST_IN_GAP_PERIOD => isolated losses within a gap period
219 * LOST_IN_BURST_PERIOD => lost packets within a burst period
220 * TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period
222 switch (clg->state) {
223 case TX_IN_GAP_PERIOD:
225 clg->state = LOST_IN_GAP_PERIOD;
227 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
228 clg->state = LOST_IN_BURST_PERIOD;
230 } else if (clg->a1 + clg->a4 < rnd) {
231 clg->state = TX_IN_GAP_PERIOD;
235 case TX_IN_BURST_PERIOD:
237 clg->state = LOST_IN_BURST_PERIOD;
240 clg->state = TX_IN_BURST_PERIOD;
244 case LOST_IN_BURST_PERIOD:
246 clg->state = TX_IN_BURST_PERIOD;
247 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
248 clg->state = TX_IN_GAP_PERIOD;
249 } else if (clg->a2 + clg->a3 < rnd) {
250 clg->state = LOST_IN_BURST_PERIOD;
254 case LOST_IN_GAP_PERIOD:
255 clg->state = TX_IN_GAP_PERIOD;
262 /* loss_gilb_ell - Gilbert-Elliot model loss generator
263 * Generates losses according to the Gilbert-Elliot loss model or
264 * its special cases (Gilbert or Simple Gilbert)
266 * Makes a comparison between random number and the transition
267 * probabilities outgoing from the current state, then decides the
268 * next state. A second random number is extracted and the comparison
269 * with the loss probability of the current state decides if the next
270 * packet will be transmitted or lost.
272 static bool loss_gilb_ell(struct netem_sched_data *q)
274 struct clgstate *clg = &q->clg;
275 struct rnd_state *s = &q->prng.prng_state;
277 switch (clg->state) {
279 if (prandom_u32_state(s) < clg->a1)
280 clg->state = BAD_STATE;
281 if (prandom_u32_state(s) < clg->a4)
285 if (prandom_u32_state(s) < clg->a2)
286 clg->state = GOOD_STATE;
287 if (prandom_u32_state(s) > clg->a3)
294 static bool loss_event(struct netem_sched_data *q)
296 switch (q->loss_model) {
298 /* Random packet drop 0 => none, ~0 => all */
299 return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng);
302 /* 4state loss model algorithm (used also for GI model)
303 * Extracts a value from the markov 4 state loss generator,
304 * if it is 1 drops a packet and if needed writes the event in
307 return loss_4state(q);
310 /* Gilbert-Elliot loss model algorithm
311 * Extracts a value from the Gilbert-Elliot loss generator,
312 * if it is 1 drops a packet and if needed writes the event in
315 return loss_gilb_ell(q);
318 return false; /* not reached */
322 /* tabledist - return a pseudo-randomly distributed value with mean mu and
323 * std deviation sigma. Uses table lookup to approximate the desired
324 * distribution, and a uniformly-distributed pseudo-random source.
326 static s64 tabledist(s64 mu, s32 sigma,
327 struct crndstate *state,
329 const struct disttable *dist)
338 rnd = get_crandom(state, prng);
340 /* default uniform distribution */
342 return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
344 t = dist->table[rnd % dist->size];
345 x = (sigma % NETEM_DIST_SCALE) * t;
347 x += NETEM_DIST_SCALE/2;
349 x -= NETEM_DIST_SCALE/2;
351 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
354 static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
356 len += q->packet_overhead;
359 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
361 if (len > cells * q->cell_size) /* extra cell needed for remainder */
363 len = cells * (q->cell_size + q->cell_overhead);
366 return div64_u64(len * NSEC_PER_SEC, q->rate);
369 static void tfifo_reset(struct Qdisc *sch)
371 struct netem_sched_data *q = qdisc_priv(sch);
372 struct rb_node *p = rb_first(&q->t_root);
375 struct sk_buff *skb = rb_to_skb(p);
378 rb_erase(&skb->rbnode, &q->t_root);
379 rtnl_kfree_skbs(skb, skb);
382 rtnl_kfree_skbs(q->t_head, q->t_tail);
387 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
389 struct netem_sched_data *q = qdisc_priv(sch);
390 u64 tnext = netem_skb_cb(nskb)->time_to_send;
392 if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
394 q->t_tail->next = nskb;
399 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
405 skb = rb_to_skb(parent);
406 if (tnext >= netem_skb_cb(skb)->time_to_send)
407 p = &parent->rb_right;
409 p = &parent->rb_left;
411 rb_link_node(&nskb->rbnode, parent, p);
412 rb_insert_color(&nskb->rbnode, &q->t_root);
417 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
418 * when we statistically choose to corrupt one, we instead segment it, returning
419 * the first packet to be corrupted, and re-enqueue the remaining frames
421 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
422 struct sk_buff **to_free)
424 struct sk_buff *segs;
425 netdev_features_t features = netif_skb_features(skb);
427 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
429 if (IS_ERR_OR_NULL(segs)) {
430 qdisc_drop(skb, sch, to_free);
438 * Insert one skb into qdisc.
439 * Note: parent depends on return value to account for queue length.
440 * NET_XMIT_DROP: queue length didn't change.
441 * NET_XMIT_SUCCESS: one skb was queued.
443 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
444 struct sk_buff **to_free)
446 struct netem_sched_data *q = qdisc_priv(sch);
447 /* We don't fill cb now as skb_unshare() may invalidate it */
448 struct netem_skb_cb *cb;
449 struct sk_buff *skb2;
450 struct sk_buff *segs = NULL;
451 unsigned int prev_len = qdisc_pkt_len(skb);
453 int rc = NET_XMIT_SUCCESS;
454 int rc_drop = NET_XMIT_DROP;
456 /* Do not fool qdisc_drop_all() */
459 /* Random duplication */
460 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng))
465 if (q->ecn && INET_ECN_set_ce(skb))
466 qdisc_qstats_drop(sch); /* mark packet */
471 qdisc_qstats_drop(sch);
472 __qdisc_drop(skb, to_free);
473 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
476 /* If a delay is expected, orphan the skb. (orphaning usually takes
477 * place at TX completion time, so _before_ the link transit delay)
479 if (q->latency || q->jitter || q->rate)
480 skb_orphan_partial(skb);
483 * If we need to duplicate packet, then re-insert at top of the
484 * qdisc tree, since parent queuer expects that only one
485 * skb will be queued.
487 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
488 struct Qdisc *rootq = qdisc_root_bh(sch);
489 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
492 rootq->enqueue(skb2, rootq, to_free);
493 q->duplicate = dupsave;
494 rc_drop = NET_XMIT_SUCCESS;
498 * Randomized packet corruption.
499 * Make copy if needed since we are modifying
500 * If packet is going to be hardware checksummed, then
501 * do it now in software before we mangle it.
503 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) {
504 if (skb_is_gso(skb)) {
505 skb = netem_segment(skb, sch, to_free);
509 skb_mark_not_on_list(skb);
510 qdisc_skb_cb(skb)->pkt_len = skb->len;
513 skb = skb_unshare(skb, GFP_ATOMIC);
514 if (unlikely(!skb)) {
515 qdisc_qstats_drop(sch);
518 if (skb->ip_summed == CHECKSUM_PARTIAL &&
519 skb_checksum_help(skb)) {
520 qdisc_drop(skb, sch, to_free);
525 skb->data[get_random_u32_below(skb_headlen(skb))] ^=
526 1<<get_random_u32_below(8);
529 if (unlikely(sch->q.qlen >= sch->limit)) {
530 /* re-link segs, so that qdisc_drop_all() frees them all */
532 qdisc_drop_all(skb, sch, to_free);
536 qdisc_qstats_backlog_inc(sch, skb);
538 cb = netem_skb_cb(skb);
539 if (q->gap == 0 || /* not doing reordering */
540 q->counter < q->gap - 1 || /* inside last reordering gap */
541 q->reorder < get_crandom(&q->reorder_cor, &q->prng)) {
545 delay = tabledist(q->latency, q->jitter,
546 &q->delay_cor, &q->prng, q->delay_dist);
548 now = ktime_get_ns();
551 struct netem_skb_cb *last = NULL;
554 last = netem_skb_cb(sch->q.tail);
555 if (q->t_root.rb_node) {
556 struct sk_buff *t_skb;
557 struct netem_skb_cb *t_last;
559 t_skb = skb_rb_last(&q->t_root);
560 t_last = netem_skb_cb(t_skb);
562 t_last->time_to_send > last->time_to_send)
566 struct netem_skb_cb *t_last =
567 netem_skb_cb(q->t_tail);
570 t_last->time_to_send > last->time_to_send)
576 * Last packet in queue is reference point (now),
577 * calculate this time bonus and subtract
580 delay -= last->time_to_send - now;
581 delay = max_t(s64, 0, delay);
582 now = last->time_to_send;
585 delay += packet_time_ns(qdisc_pkt_len(skb), q);
588 cb->time_to_send = now + delay;
590 tfifo_enqueue(skb, sch);
593 * Do re-ordering by putting one out of N packets at the front
596 cb->time_to_send = ktime_get_ns();
599 __qdisc_enqueue_head(skb, &sch->q);
600 sch->qstats.requeues++;
605 unsigned int len, last_len;
608 len = skb ? skb->len : 0;
613 skb_mark_not_on_list(segs);
614 qdisc_skb_cb(segs)->pkt_len = segs->len;
615 last_len = segs->len;
616 rc = qdisc_enqueue(segs, sch, to_free);
617 if (rc != NET_XMIT_SUCCESS) {
618 if (net_xmit_drop_count(rc))
619 qdisc_qstats_drop(sch);
626 /* Parent qdiscs accounted for 1 skb of size @prev_len */
627 qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
629 return NET_XMIT_DROP;
631 return NET_XMIT_SUCCESS;
634 /* Delay the next round with a new future slot with a
635 * correct number of bytes and packets.
638 static void get_slot_next(struct netem_sched_data *q, u64 now)
643 next_delay = q->slot_config.min_delay +
645 (q->slot_config.max_delay -
646 q->slot_config.min_delay) >> 32);
648 next_delay = tabledist(q->slot_config.dist_delay,
649 (s32)(q->slot_config.dist_jitter),
650 NULL, &q->prng, q->slot_dist);
652 q->slot.slot_next = now + next_delay;
653 q->slot.packets_left = q->slot_config.max_packets;
654 q->slot.bytes_left = q->slot_config.max_bytes;
657 static struct sk_buff *netem_peek(struct netem_sched_data *q)
659 struct sk_buff *skb = skb_rb_first(&q->t_root);
667 t1 = netem_skb_cb(skb)->time_to_send;
668 t2 = netem_skb_cb(q->t_head)->time_to_send;
674 static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
676 if (skb == q->t_head) {
677 q->t_head = skb->next;
681 rb_erase(&skb->rbnode, &q->t_root);
685 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
687 struct netem_sched_data *q = qdisc_priv(sch);
691 skb = __qdisc_dequeue_head(&sch->q);
693 qdisc_qstats_backlog_dec(sch, skb);
695 qdisc_bstats_update(sch, skb);
701 u64 now = ktime_get_ns();
703 /* if more time remaining? */
704 time_to_send = netem_skb_cb(skb)->time_to_send;
705 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
706 get_slot_next(q, now);
708 if (time_to_send <= now && q->slot.slot_next <= now) {
709 netem_erase_head(q, skb);
711 qdisc_qstats_backlog_dec(sch, skb);
714 /* skb->dev shares skb->rbnode area,
715 * we need to restore its value.
717 skb->dev = qdisc_dev(sch);
719 if (q->slot.slot_next) {
720 q->slot.packets_left--;
721 q->slot.bytes_left -= qdisc_pkt_len(skb);
722 if (q->slot.packets_left <= 0 ||
723 q->slot.bytes_left <= 0)
724 get_slot_next(q, now);
728 unsigned int pkt_len = qdisc_pkt_len(skb);
729 struct sk_buff *to_free = NULL;
732 err = qdisc_enqueue(skb, q->qdisc, &to_free);
733 kfree_skb_list(to_free);
734 if (err != NET_XMIT_SUCCESS &&
735 net_xmit_drop_count(err)) {
736 qdisc_qstats_drop(sch);
737 qdisc_tree_reduce_backlog(sch, 1,
746 skb = q->qdisc->ops->dequeue(q->qdisc);
751 qdisc_watchdog_schedule_ns(&q->watchdog,
757 skb = q->qdisc->ops->dequeue(q->qdisc);
764 static void netem_reset(struct Qdisc *sch)
766 struct netem_sched_data *q = qdisc_priv(sch);
768 qdisc_reset_queue(sch);
771 qdisc_reset(q->qdisc);
772 qdisc_watchdog_cancel(&q->watchdog);
775 static void dist_free(struct disttable *d)
781 * Distribution data is a variable size payload containing
782 * signed 16 bit values.
785 static int get_dist_table(struct disttable **tbl, const struct nlattr *attr)
787 size_t n = nla_len(attr)/sizeof(__s16);
788 const __s16 *data = nla_data(attr);
792 if (!n || n > NETEM_DIST_MAX)
795 d = kvmalloc(struct_size(d, table, n), GFP_KERNEL);
800 for (i = 0; i < n; i++)
801 d->table[i] = data[i];
807 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
809 const struct tc_netem_slot *c = nla_data(attr);
812 if (q->slot_config.max_packets == 0)
813 q->slot_config.max_packets = INT_MAX;
814 if (q->slot_config.max_bytes == 0)
815 q->slot_config.max_bytes = INT_MAX;
817 /* capping dist_jitter to the range acceptable by tabledist() */
818 q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
820 q->slot.packets_left = q->slot_config.max_packets;
821 q->slot.bytes_left = q->slot_config.max_bytes;
822 if (q->slot_config.min_delay | q->slot_config.max_delay |
823 q->slot_config.dist_jitter)
824 q->slot.slot_next = ktime_get_ns();
826 q->slot.slot_next = 0;
829 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
831 const struct tc_netem_corr *c = nla_data(attr);
833 init_crandom(&q->delay_cor, c->delay_corr);
834 init_crandom(&q->loss_cor, c->loss_corr);
835 init_crandom(&q->dup_cor, c->dup_corr);
838 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
840 const struct tc_netem_reorder *r = nla_data(attr);
842 q->reorder = r->probability;
843 init_crandom(&q->reorder_cor, r->correlation);
846 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
848 const struct tc_netem_corrupt *r = nla_data(attr);
850 q->corrupt = r->probability;
851 init_crandom(&q->corrupt_cor, r->correlation);
854 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
856 const struct tc_netem_rate *r = nla_data(attr);
859 q->packet_overhead = r->packet_overhead;
860 q->cell_size = r->cell_size;
861 q->cell_overhead = r->cell_overhead;
863 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
865 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
868 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
870 const struct nlattr *la;
873 nla_for_each_nested(la, attr, rem) {
874 u16 type = nla_type(la);
877 case NETEM_LOSS_GI: {
878 const struct tc_netem_gimodel *gi = nla_data(la);
880 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
881 pr_info("netem: incorrect gi model size\n");
885 q->loss_model = CLG_4_STATES;
887 q->clg.state = TX_IN_GAP_PERIOD;
896 case NETEM_LOSS_GE: {
897 const struct tc_netem_gemodel *ge = nla_data(la);
899 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
900 pr_info("netem: incorrect ge model size\n");
904 q->loss_model = CLG_GILB_ELL;
905 q->clg.state = GOOD_STATE;
914 pr_info("netem: unknown loss type %u\n", type);
922 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
923 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
924 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
925 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
926 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
927 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
928 [TCA_NETEM_ECN] = { .type = NLA_U32 },
929 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
930 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
931 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
932 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
933 [TCA_NETEM_PRNG_SEED] = { .type = NLA_U64 },
936 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
937 const struct nla_policy *policy, int len)
939 int nested_len = nla_len(nla) - NLA_ALIGN(len);
941 if (nested_len < 0) {
942 pr_info("netem: invalid attributes len %d\n", nested_len);
946 if (nested_len >= nla_attr_size(0))
947 return nla_parse_deprecated(tb, maxtype,
948 nla_data(nla) + NLA_ALIGN(len),
949 nested_len, policy, NULL);
951 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
955 /* Parse netlink message to set options */
956 static int netem_change(struct Qdisc *sch, struct nlattr *opt,
957 struct netlink_ext_ack *extack)
959 struct netem_sched_data *q = qdisc_priv(sch);
960 struct nlattr *tb[TCA_NETEM_MAX + 1];
961 struct disttable *delay_dist = NULL;
962 struct disttable *slot_dist = NULL;
963 struct tc_netem_qopt *qopt;
964 struct clgstate old_clg;
965 int old_loss_model = CLG_RANDOM;
968 qopt = nla_data(opt);
969 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
973 if (tb[TCA_NETEM_DELAY_DIST]) {
974 ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]);
979 if (tb[TCA_NETEM_SLOT_DIST]) {
980 ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]);
986 /* backup q->clg and q->loss_model */
988 old_loss_model = q->loss_model;
990 if (tb[TCA_NETEM_LOSS]) {
991 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
993 q->loss_model = old_loss_model;
998 q->loss_model = CLG_RANDOM;
1002 swap(q->delay_dist, delay_dist);
1004 swap(q->slot_dist, slot_dist);
1005 sch->limit = qopt->limit;
1007 q->latency = PSCHED_TICKS2NS(qopt->latency);
1008 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
1009 q->limit = qopt->limit;
1012 q->loss = qopt->loss;
1013 q->duplicate = qopt->duplicate;
1015 /* for compatibility with earlier versions.
1016 * if gap is set, need to assume 100% probability
1021 if (tb[TCA_NETEM_CORR])
1022 get_correlation(q, tb[TCA_NETEM_CORR]);
1024 if (tb[TCA_NETEM_REORDER])
1025 get_reorder(q, tb[TCA_NETEM_REORDER]);
1027 if (tb[TCA_NETEM_CORRUPT])
1028 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1030 if (tb[TCA_NETEM_RATE])
1031 get_rate(q, tb[TCA_NETEM_RATE]);
1033 if (tb[TCA_NETEM_RATE64])
1034 q->rate = max_t(u64, q->rate,
1035 nla_get_u64(tb[TCA_NETEM_RATE64]));
1037 if (tb[TCA_NETEM_LATENCY64])
1038 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1040 if (tb[TCA_NETEM_JITTER64])
1041 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1043 if (tb[TCA_NETEM_ECN])
1044 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1046 if (tb[TCA_NETEM_SLOT])
1047 get_slot(q, tb[TCA_NETEM_SLOT]);
1049 /* capping jitter to the range acceptable by tabledist() */
1050 q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
1052 if (tb[TCA_NETEM_PRNG_SEED])
1053 q->prng.seed = nla_get_u64(tb[TCA_NETEM_PRNG_SEED]);
1055 q->prng.seed = get_random_u64();
1056 prandom_seed_state(&q->prng.prng_state, q->prng.seed);
1059 sch_tree_unlock(sch);
1062 dist_free(delay_dist);
1063 dist_free(slot_dist);
1067 static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1068 struct netlink_ext_ack *extack)
1070 struct netem_sched_data *q = qdisc_priv(sch);
1073 qdisc_watchdog_init(&q->watchdog, sch);
1078 q->loss_model = CLG_RANDOM;
1079 ret = netem_change(sch, opt, extack);
1081 pr_info("netem: change failed\n");
1085 static void netem_destroy(struct Qdisc *sch)
1087 struct netem_sched_data *q = qdisc_priv(sch);
1089 qdisc_watchdog_cancel(&q->watchdog);
1091 qdisc_put(q->qdisc);
1092 dist_free(q->delay_dist);
1093 dist_free(q->slot_dist);
1096 static int dump_loss_model(const struct netem_sched_data *q,
1097 struct sk_buff *skb)
1099 struct nlattr *nest;
1101 nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
1103 goto nla_put_failure;
1105 switch (q->loss_model) {
1107 /* legacy loss model */
1108 nla_nest_cancel(skb, nest);
1109 return 0; /* no data */
1111 case CLG_4_STATES: {
1112 struct tc_netem_gimodel gi = {
1120 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1121 goto nla_put_failure;
1124 case CLG_GILB_ELL: {
1125 struct tc_netem_gemodel ge = {
1132 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1133 goto nla_put_failure;
1138 nla_nest_end(skb, nest);
1142 nla_nest_cancel(skb, nest);
1146 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1148 const struct netem_sched_data *q = qdisc_priv(sch);
1149 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1150 struct tc_netem_qopt qopt;
1151 struct tc_netem_corr cor;
1152 struct tc_netem_reorder reorder;
1153 struct tc_netem_corrupt corrupt;
1154 struct tc_netem_rate rate;
1155 struct tc_netem_slot slot;
1157 qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
1159 qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
1161 qopt.limit = q->limit;
1162 qopt.loss = q->loss;
1164 qopt.duplicate = q->duplicate;
1165 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1166 goto nla_put_failure;
1168 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1169 goto nla_put_failure;
1171 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1172 goto nla_put_failure;
1174 cor.delay_corr = q->delay_cor.rho;
1175 cor.loss_corr = q->loss_cor.rho;
1176 cor.dup_corr = q->dup_cor.rho;
1177 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1178 goto nla_put_failure;
1180 reorder.probability = q->reorder;
1181 reorder.correlation = q->reorder_cor.rho;
1182 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1183 goto nla_put_failure;
1185 corrupt.probability = q->corrupt;
1186 corrupt.correlation = q->corrupt_cor.rho;
1187 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1188 goto nla_put_failure;
1190 if (q->rate >= (1ULL << 32)) {
1191 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1193 goto nla_put_failure;
1196 rate.rate = q->rate;
1198 rate.packet_overhead = q->packet_overhead;
1199 rate.cell_size = q->cell_size;
1200 rate.cell_overhead = q->cell_overhead;
1201 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1202 goto nla_put_failure;
1204 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1205 goto nla_put_failure;
1207 if (dump_loss_model(q, skb) != 0)
1208 goto nla_put_failure;
1210 if (q->slot_config.min_delay | q->slot_config.max_delay |
1211 q->slot_config.dist_jitter) {
1212 slot = q->slot_config;
1213 if (slot.max_packets == INT_MAX)
1214 slot.max_packets = 0;
1215 if (slot.max_bytes == INT_MAX)
1217 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1218 goto nla_put_failure;
1221 if (nla_put_u64_64bit(skb, TCA_NETEM_PRNG_SEED, q->prng.seed,
1223 goto nla_put_failure;
1225 return nla_nest_end(skb, nla);
1228 nlmsg_trim(skb, nla);
1232 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1233 struct sk_buff *skb, struct tcmsg *tcm)
1235 struct netem_sched_data *q = qdisc_priv(sch);
1237 if (cl != 1 || !q->qdisc) /* only one class */
1240 tcm->tcm_handle |= TC_H_MIN(1);
1241 tcm->tcm_info = q->qdisc->handle;
1246 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1247 struct Qdisc **old, struct netlink_ext_ack *extack)
1249 struct netem_sched_data *q = qdisc_priv(sch);
1251 *old = qdisc_replace(sch, new, &q->qdisc);
1255 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1257 struct netem_sched_data *q = qdisc_priv(sch);
1261 static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1266 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1268 if (!walker->stop) {
1269 if (!tc_qdisc_stats_dump(sch, 1, walker))
1274 static const struct Qdisc_class_ops netem_class_ops = {
1275 .graft = netem_graft,
1279 .dump = netem_dump_class,
1282 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1284 .cl_ops = &netem_class_ops,
1285 .priv_size = sizeof(struct netem_sched_data),
1286 .enqueue = netem_enqueue,
1287 .dequeue = netem_dequeue,
1288 .peek = qdisc_peek_dequeued,
1290 .reset = netem_reset,
1291 .destroy = netem_destroy,
1292 .change = netem_change,
1294 .owner = THIS_MODULE,
1298 static int __init netem_module_init(void)
1300 pr_info("netem: version " VERSION "\n");
1301 return register_qdisc(&netem_qdisc_ops);
1303 static void __exit netem_module_exit(void)
1305 unregister_qdisc(&netem_qdisc_ops);
1307 module_init(netem_module_init)
1308 module_exit(netem_module_exit)
1309 MODULE_LICENSE("GPL");
1310 MODULE_DESCRIPTION("Network characteristics emulator qdisc");