2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/reciprocal_div.h>
26 #include <linux/rbtree.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/inet_ecn.h>
34 /* Network Emulation Queuing algorithm.
35 ====================================
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
41 ----------------------------------------------------------------
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
56 Correlated Loss Generator models
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
71 struct netem_sched_data {
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
75 /* optional qdisc for classful handling (NULL at netem init) */
78 struct qdisc_watchdog watchdog;
94 struct reciprocal_value cell_size_reciprocal;
100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
114 TX_IN_GAP_PERIOD = 1,
117 LOST_IN_BURST_PERIOD,
125 /* Correlated Loss Generation models */
127 /* state of the Markov chain */
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
138 struct tc_netem_slot slot_config;
147 /* Time stamp put into socket buffer control block
148 * Only valid when skbs are in our internal t(ime)fifo queue.
150 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
151 * and skb->next & skb->prev are scratch space for a qdisc,
152 * we save skb->tstamp value in skb->cb[] before destroying it.
154 struct netem_skb_cb {
158 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
165 /* init_crandom - initialize correlated random number generator
166 * Use entropy source for initial seed.
168 static void init_crandom(struct crndstate *state, unsigned long rho)
171 state->last = prandom_u32();
174 /* get_crandom - correlated random number generator
175 * Next number depends on last value.
176 * rho is scaled to avoid floating point.
178 static u32 get_crandom(struct crndstate *state)
181 unsigned long answer;
183 if (state->rho == 0) /* no correlation */
184 return prandom_u32();
186 value = prandom_u32();
187 rho = (u64)state->rho + 1;
188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
189 state->last = answer;
193 /* loss_4state - 4-state model loss generator
194 * Generates losses according to the 4-state Markov chain adopted in
195 * the GI (General and Intuitive) loss model.
197 static bool loss_4state(struct netem_sched_data *q)
199 struct clgstate *clg = &q->clg;
200 u32 rnd = prandom_u32();
203 * Makes a comparison between rnd and the transition
204 * probabilities outgoing from the current state, then decides the
205 * next state and if the next packet has to be transmitted or lost.
206 * The four states correspond to:
207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
209 * LOST_IN_GAP_PERIOD => lost packets within a burst period
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
212 switch (clg->state) {
213 case TX_IN_GAP_PERIOD:
215 clg->state = LOST_IN_BURST_PERIOD;
217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
218 clg->state = LOST_IN_GAP_PERIOD;
220 } else if (clg->a1 + clg->a4 < rnd) {
221 clg->state = TX_IN_GAP_PERIOD;
225 case TX_IN_BURST_PERIOD:
227 clg->state = LOST_IN_GAP_PERIOD;
230 clg->state = TX_IN_BURST_PERIOD;
234 case LOST_IN_GAP_PERIOD:
236 clg->state = TX_IN_BURST_PERIOD;
237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
238 clg->state = TX_IN_GAP_PERIOD;
239 } else if (clg->a2 + clg->a3 < rnd) {
240 clg->state = LOST_IN_GAP_PERIOD;
244 case LOST_IN_BURST_PERIOD:
245 clg->state = TX_IN_GAP_PERIOD;
252 /* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
256 * Makes a comparison between random number and the transition
257 * probabilities outgoing from the current state, then decides the
258 * next state. A second random number is extracted and the comparison
259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
262 static bool loss_gilb_ell(struct netem_sched_data *q)
264 struct clgstate *clg = &q->clg;
266 switch (clg->state) {
268 if (prandom_u32() < clg->a1)
269 clg->state = BAD_STATE;
270 if (prandom_u32() < clg->a4)
274 if (prandom_u32() < clg->a2)
275 clg->state = GOOD_STATE;
276 if (prandom_u32() > clg->a3)
283 static bool loss_event(struct netem_sched_data *q)
285 switch (q->loss_model) {
287 /* Random packet drop 0 => none, ~0 => all */
288 return q->loss && q->loss >= get_crandom(&q->loss_cor);
291 /* 4state loss model algorithm (used also for GI model)
292 * Extracts a value from the markov 4 state loss generator,
293 * if it is 1 drops a packet and if needed writes the event in
296 return loss_4state(q);
299 /* Gilbert-Elliot loss model algorithm
300 * Extracts a value from the Gilbert-Elliot loss generator,
301 * if it is 1 drops a packet and if needed writes the event in
304 return loss_gilb_ell(q);
307 return false; /* not reached */
311 /* tabledist - return a pseudo-randomly distributed value with mean mu and
312 * std deviation sigma. Uses table lookup to approximate the desired
313 * distribution, and a uniformly-distributed pseudo-random source.
315 static s64 tabledist(s64 mu, s64 sigma,
316 struct crndstate *state,
317 const struct disttable *dist)
326 rnd = get_crandom(state);
328 /* default uniform distribution */
330 return (rnd % (2*sigma)) - sigma + mu;
332 t = dist->table[rnd % dist->size];
333 x = (sigma % NETEM_DIST_SCALE) * t;
335 x += NETEM_DIST_SCALE/2;
337 x -= NETEM_DIST_SCALE/2;
339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
342 static u64 packet_len_2_sched_time(unsigned int len,
343 struct netem_sched_data *q)
346 len += q->packet_overhead;
349 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
351 if (len > cells * q->cell_size) /* extra cell needed for remainder */
353 len = cells * (q->cell_size + q->cell_overhead);
355 offset = (u64)len * NSEC_PER_SEC;
356 do_div(offset, q->rate);
360 static void tfifo_reset(struct Qdisc *sch)
362 struct netem_sched_data *q = qdisc_priv(sch);
363 struct rb_node *p = rb_first(&q->t_root);
366 struct sk_buff *skb = rb_to_skb(p);
369 rb_erase(&skb->rbnode, &q->t_root);
370 rtnl_kfree_skbs(skb, skb);
374 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
376 struct netem_sched_data *q = qdisc_priv(sch);
377 u64 tnext = netem_skb_cb(nskb)->time_to_send;
378 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
384 skb = rb_to_skb(parent);
385 if (tnext >= netem_skb_cb(skb)->time_to_send)
386 p = &parent->rb_right;
388 p = &parent->rb_left;
390 rb_link_node(&nskb->rbnode, parent, p);
391 rb_insert_color(&nskb->rbnode, &q->t_root);
395 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
396 * when we statistically choose to corrupt one, we instead segment it, returning
397 * the first packet to be corrupted, and re-enqueue the remaining frames
399 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
400 struct sk_buff **to_free)
402 struct sk_buff *segs;
403 netdev_features_t features = netif_skb_features(skb);
405 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
407 if (IS_ERR_OR_NULL(segs)) {
408 qdisc_drop(skb, sch, to_free);
415 static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
417 skb->next = qh->head;
426 * Insert one skb into qdisc.
427 * Note: parent depends on return value to account for queue length.
428 * NET_XMIT_DROP: queue length didn't change.
429 * NET_XMIT_SUCCESS: one skb was queued.
431 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
432 struct sk_buff **to_free)
434 struct netem_sched_data *q = qdisc_priv(sch);
435 /* We don't fill cb now as skb_unshare() may invalidate it */
436 struct netem_skb_cb *cb;
437 struct sk_buff *skb2;
438 struct sk_buff *segs = NULL;
439 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
442 int rc = NET_XMIT_SUCCESS;
444 /* Random duplication */
445 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
450 if (q->ecn && INET_ECN_set_ce(skb))
451 qdisc_qstats_drop(sch); /* mark packet */
456 qdisc_qstats_drop(sch);
457 __qdisc_drop(skb, to_free);
458 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
461 /* If a delay is expected, orphan the skb. (orphaning usually takes
462 * place at TX completion time, so _before_ the link transit delay)
464 if (q->latency || q->jitter || q->rate)
465 skb_orphan_partial(skb);
468 * If we need to duplicate packet, then re-insert at top of the
469 * qdisc tree, since parent queuer expects that only one
470 * skb will be queued.
472 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
473 struct Qdisc *rootq = qdisc_root(sch);
474 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
477 rootq->enqueue(skb2, rootq, to_free);
478 q->duplicate = dupsave;
482 * Randomized packet corruption.
483 * Make copy if needed since we are modifying
484 * If packet is going to be hardware checksummed, then
485 * do it now in software before we mangle it.
487 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
488 if (skb_is_gso(skb)) {
489 segs = netem_segment(skb, sch, to_free);
491 return NET_XMIT_DROP;
499 skb = skb_unshare(skb, GFP_ATOMIC);
500 if (unlikely(!skb)) {
501 qdisc_qstats_drop(sch);
504 if (skb->ip_summed == CHECKSUM_PARTIAL &&
505 skb_checksum_help(skb)) {
506 qdisc_drop(skb, sch, to_free);
510 skb->data[prandom_u32() % skb_headlen(skb)] ^=
511 1<<(prandom_u32() % 8);
514 if (unlikely(sch->q.qlen >= sch->limit))
515 return qdisc_drop(skb, sch, to_free);
517 qdisc_qstats_backlog_inc(sch, skb);
519 cb = netem_skb_cb(skb);
520 if (q->gap == 0 || /* not doing reordering */
521 q->counter < q->gap - 1 || /* inside last reordering gap */
522 q->reorder < get_crandom(&q->reorder_cor)) {
526 delay = tabledist(q->latency, q->jitter,
527 &q->delay_cor, q->delay_dist);
529 now = ktime_get_ns();
532 struct netem_skb_cb *last = NULL;
535 last = netem_skb_cb(sch->q.tail);
536 if (q->t_root.rb_node) {
537 struct sk_buff *t_skb;
538 struct netem_skb_cb *t_last;
540 t_skb = skb_rb_last(&q->t_root);
541 t_last = netem_skb_cb(t_skb);
543 t_last->time_to_send > last->time_to_send) {
550 * Last packet in queue is reference point (now),
551 * calculate this time bonus and subtract
554 delay -= last->time_to_send - now;
555 delay = max_t(s64, 0, delay);
556 now = last->time_to_send;
559 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
562 cb->time_to_send = now + delay;
564 tfifo_enqueue(skb, sch);
567 * Do re-ordering by putting one out of N packets at the front
570 cb->time_to_send = ktime_get_ns();
573 netem_enqueue_skb_head(&sch->q, skb);
574 sch->qstats.requeues++;
582 qdisc_skb_cb(segs)->pkt_len = segs->len;
583 last_len = segs->len;
584 rc = qdisc_enqueue(segs, sch, to_free);
585 if (rc != NET_XMIT_SUCCESS) {
586 if (net_xmit_drop_count(rc))
587 qdisc_qstats_drop(sch);
596 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
598 return NET_XMIT_SUCCESS;
601 /* Delay the next round with a new future slot with a
602 * correct number of bytes and packets.
605 static void get_slot_next(struct netem_sched_data *q, u64 now)
607 q->slot.slot_next = now + q->slot_config.min_delay +
609 (q->slot_config.max_delay -
610 q->slot_config.min_delay) >> 32);
611 q->slot.packets_left = q->slot_config.max_packets;
612 q->slot.bytes_left = q->slot_config.max_bytes;
615 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
617 struct netem_sched_data *q = qdisc_priv(sch);
622 skb = __qdisc_dequeue_head(&sch->q);
624 qdisc_qstats_backlog_dec(sch, skb);
626 qdisc_bstats_update(sch, skb);
629 p = rb_first(&q->t_root);
632 u64 now = ktime_get_ns();
636 /* if more time remaining? */
637 time_to_send = netem_skb_cb(skb)->time_to_send;
638 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
639 get_slot_next(q, now);
641 if (time_to_send <= now && q->slot.slot_next <= now) {
642 rb_erase(p, &q->t_root);
644 qdisc_qstats_backlog_dec(sch, skb);
647 /* skb->dev shares skb->rbnode area,
648 * we need to restore its value.
650 skb->dev = qdisc_dev(sch);
652 #ifdef CONFIG_NET_CLS_ACT
654 * If it's at ingress let's pretend the delay is
655 * from the network (tstamp will be updated).
657 if (skb->tc_redirected && skb->tc_from_ingress)
661 if (q->slot.slot_next) {
662 q->slot.packets_left--;
663 q->slot.bytes_left -= qdisc_pkt_len(skb);
664 if (q->slot.packets_left <= 0 ||
665 q->slot.bytes_left <= 0)
666 get_slot_next(q, now);
670 unsigned int pkt_len = qdisc_pkt_len(skb);
671 struct sk_buff *to_free = NULL;
674 err = qdisc_enqueue(skb, q->qdisc, &to_free);
675 kfree_skb_list(to_free);
676 if (err != NET_XMIT_SUCCESS &&
677 net_xmit_drop_count(err)) {
678 qdisc_qstats_drop(sch);
679 qdisc_tree_reduce_backlog(sch, 1,
688 skb = q->qdisc->ops->dequeue(q->qdisc);
693 qdisc_watchdog_schedule_ns(&q->watchdog,
699 skb = q->qdisc->ops->dequeue(q->qdisc);
706 static void netem_reset(struct Qdisc *sch)
708 struct netem_sched_data *q = qdisc_priv(sch);
710 qdisc_reset_queue(sch);
713 qdisc_reset(q->qdisc);
714 qdisc_watchdog_cancel(&q->watchdog);
717 static void dist_free(struct disttable *d)
723 * Distribution data is a variable size payload containing
724 * signed 16 bit values.
727 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
729 struct netem_sched_data *q = qdisc_priv(sch);
730 size_t n = nla_len(attr)/sizeof(__s16);
731 const __s16 *data = nla_data(attr);
732 spinlock_t *root_lock;
736 if (n > NETEM_DIST_MAX)
739 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
744 for (i = 0; i < n; i++)
745 d->table[i] = data[i];
747 root_lock = qdisc_root_sleeping_lock(sch);
749 spin_lock_bh(root_lock);
750 swap(q->delay_dist, d);
751 spin_unlock_bh(root_lock);
757 static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
759 const struct tc_netem_slot *c = nla_data(attr);
762 if (q->slot_config.max_packets == 0)
763 q->slot_config.max_packets = INT_MAX;
764 if (q->slot_config.max_bytes == 0)
765 q->slot_config.max_bytes = INT_MAX;
766 q->slot.packets_left = q->slot_config.max_packets;
767 q->slot.bytes_left = q->slot_config.max_bytes;
768 if (q->slot_config.min_delay | q->slot_config.max_delay)
769 q->slot.slot_next = ktime_get_ns();
771 q->slot.slot_next = 0;
774 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
776 const struct tc_netem_corr *c = nla_data(attr);
778 init_crandom(&q->delay_cor, c->delay_corr);
779 init_crandom(&q->loss_cor, c->loss_corr);
780 init_crandom(&q->dup_cor, c->dup_corr);
783 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
785 const struct tc_netem_reorder *r = nla_data(attr);
787 q->reorder = r->probability;
788 init_crandom(&q->reorder_cor, r->correlation);
791 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
793 const struct tc_netem_corrupt *r = nla_data(attr);
795 q->corrupt = r->probability;
796 init_crandom(&q->corrupt_cor, r->correlation);
799 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
801 const struct tc_netem_rate *r = nla_data(attr);
804 q->packet_overhead = r->packet_overhead;
805 q->cell_size = r->cell_size;
806 q->cell_overhead = r->cell_overhead;
808 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
810 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
813 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
815 const struct nlattr *la;
818 nla_for_each_nested(la, attr, rem) {
819 u16 type = nla_type(la);
822 case NETEM_LOSS_GI: {
823 const struct tc_netem_gimodel *gi = nla_data(la);
825 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
826 pr_info("netem: incorrect gi model size\n");
830 q->loss_model = CLG_4_STATES;
832 q->clg.state = TX_IN_GAP_PERIOD;
841 case NETEM_LOSS_GE: {
842 const struct tc_netem_gemodel *ge = nla_data(la);
844 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
845 pr_info("netem: incorrect ge model size\n");
849 q->loss_model = CLG_GILB_ELL;
850 q->clg.state = GOOD_STATE;
859 pr_info("netem: unknown loss type %u\n", type);
867 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
868 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
869 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
870 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
871 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
872 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
873 [TCA_NETEM_ECN] = { .type = NLA_U32 },
874 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
875 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
876 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
877 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
880 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
881 const struct nla_policy *policy, int len)
883 int nested_len = nla_len(nla) - NLA_ALIGN(len);
885 if (nested_len < 0) {
886 pr_info("netem: invalid attributes len %d\n", nested_len);
890 if (nested_len >= nla_attr_size(0))
891 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
892 nested_len, policy, NULL);
894 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
898 /* Parse netlink message to set options */
899 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
901 struct netem_sched_data *q = qdisc_priv(sch);
902 struct nlattr *tb[TCA_NETEM_MAX + 1];
903 struct tc_netem_qopt *qopt;
904 struct clgstate old_clg;
905 int old_loss_model = CLG_RANDOM;
911 qopt = nla_data(opt);
912 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
916 /* backup q->clg and q->loss_model */
918 old_loss_model = q->loss_model;
920 if (tb[TCA_NETEM_LOSS]) {
921 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
923 q->loss_model = old_loss_model;
927 q->loss_model = CLG_RANDOM;
930 if (tb[TCA_NETEM_DELAY_DIST]) {
931 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
933 /* recover clg and loss_model, in case of
934 * q->clg and q->loss_model were modified
938 q->loss_model = old_loss_model;
943 sch->limit = qopt->limit;
945 q->latency = PSCHED_TICKS2NS(qopt->latency);
946 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
947 q->limit = qopt->limit;
950 q->loss = qopt->loss;
951 q->duplicate = qopt->duplicate;
953 /* for compatibility with earlier versions.
954 * if gap is set, need to assume 100% probability
959 if (tb[TCA_NETEM_CORR])
960 get_correlation(q, tb[TCA_NETEM_CORR]);
962 if (tb[TCA_NETEM_REORDER])
963 get_reorder(q, tb[TCA_NETEM_REORDER]);
965 if (tb[TCA_NETEM_CORRUPT])
966 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
968 if (tb[TCA_NETEM_RATE])
969 get_rate(q, tb[TCA_NETEM_RATE]);
971 if (tb[TCA_NETEM_RATE64])
972 q->rate = max_t(u64, q->rate,
973 nla_get_u64(tb[TCA_NETEM_RATE64]));
975 if (tb[TCA_NETEM_LATENCY64])
976 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
978 if (tb[TCA_NETEM_JITTER64])
979 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
981 if (tb[TCA_NETEM_ECN])
982 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
984 if (tb[TCA_NETEM_SLOT])
985 get_slot(q, tb[TCA_NETEM_SLOT]);
990 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
992 struct netem_sched_data *q = qdisc_priv(sch);
995 qdisc_watchdog_init(&q->watchdog, sch);
1000 q->loss_model = CLG_RANDOM;
1001 ret = netem_change(sch, opt);
1003 pr_info("netem: change failed\n");
1007 static void netem_destroy(struct Qdisc *sch)
1009 struct netem_sched_data *q = qdisc_priv(sch);
1011 qdisc_watchdog_cancel(&q->watchdog);
1013 qdisc_destroy(q->qdisc);
1014 dist_free(q->delay_dist);
1017 static int dump_loss_model(const struct netem_sched_data *q,
1018 struct sk_buff *skb)
1020 struct nlattr *nest;
1022 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
1024 goto nla_put_failure;
1026 switch (q->loss_model) {
1028 /* legacy loss model */
1029 nla_nest_cancel(skb, nest);
1030 return 0; /* no data */
1032 case CLG_4_STATES: {
1033 struct tc_netem_gimodel gi = {
1041 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1042 goto nla_put_failure;
1045 case CLG_GILB_ELL: {
1046 struct tc_netem_gemodel ge = {
1053 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1054 goto nla_put_failure;
1059 nla_nest_end(skb, nest);
1063 nla_nest_cancel(skb, nest);
1067 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1069 const struct netem_sched_data *q = qdisc_priv(sch);
1070 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1071 struct tc_netem_qopt qopt;
1072 struct tc_netem_corr cor;
1073 struct tc_netem_reorder reorder;
1074 struct tc_netem_corrupt corrupt;
1075 struct tc_netem_rate rate;
1076 struct tc_netem_slot slot;
1078 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1080 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1082 qopt.limit = q->limit;
1083 qopt.loss = q->loss;
1085 qopt.duplicate = q->duplicate;
1086 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1087 goto nla_put_failure;
1089 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1090 goto nla_put_failure;
1092 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1093 goto nla_put_failure;
1095 cor.delay_corr = q->delay_cor.rho;
1096 cor.loss_corr = q->loss_cor.rho;
1097 cor.dup_corr = q->dup_cor.rho;
1098 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1099 goto nla_put_failure;
1101 reorder.probability = q->reorder;
1102 reorder.correlation = q->reorder_cor.rho;
1103 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1104 goto nla_put_failure;
1106 corrupt.probability = q->corrupt;
1107 corrupt.correlation = q->corrupt_cor.rho;
1108 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1109 goto nla_put_failure;
1111 if (q->rate >= (1ULL << 32)) {
1112 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1114 goto nla_put_failure;
1117 rate.rate = q->rate;
1119 rate.packet_overhead = q->packet_overhead;
1120 rate.cell_size = q->cell_size;
1121 rate.cell_overhead = q->cell_overhead;
1122 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1123 goto nla_put_failure;
1125 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1126 goto nla_put_failure;
1128 if (dump_loss_model(q, skb) != 0)
1129 goto nla_put_failure;
1131 if (q->slot_config.min_delay | q->slot_config.max_delay) {
1132 slot = q->slot_config;
1133 if (slot.max_packets == INT_MAX)
1134 slot.max_packets = 0;
1135 if (slot.max_bytes == INT_MAX)
1137 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1138 goto nla_put_failure;
1141 return nla_nest_end(skb, nla);
1144 nlmsg_trim(skb, nla);
1148 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1149 struct sk_buff *skb, struct tcmsg *tcm)
1151 struct netem_sched_data *q = qdisc_priv(sch);
1153 if (cl != 1 || !q->qdisc) /* only one class */
1156 tcm->tcm_handle |= TC_H_MIN(1);
1157 tcm->tcm_info = q->qdisc->handle;
1162 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1165 struct netem_sched_data *q = qdisc_priv(sch);
1167 *old = qdisc_replace(sch, new, &q->qdisc);
1171 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1173 struct netem_sched_data *q = qdisc_priv(sch);
1177 static unsigned long netem_find(struct Qdisc *sch, u32 classid)
1182 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1184 if (!walker->stop) {
1185 if (walker->count >= walker->skip)
1186 if (walker->fn(sch, 1, walker) < 0) {
1194 static const struct Qdisc_class_ops netem_class_ops = {
1195 .graft = netem_graft,
1199 .dump = netem_dump_class,
1202 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1204 .cl_ops = &netem_class_ops,
1205 .priv_size = sizeof(struct netem_sched_data),
1206 .enqueue = netem_enqueue,
1207 .dequeue = netem_dequeue,
1208 .peek = qdisc_peek_dequeued,
1210 .reset = netem_reset,
1211 .destroy = netem_destroy,
1212 .change = netem_change,
1214 .owner = THIS_MODULE,
1218 static int __init netem_module_init(void)
1220 pr_info("netem: version " VERSION "\n");
1221 return register_qdisc(&netem_qdisc_ops);
1223 static void __exit netem_module_exit(void)
1225 unregister_qdisc(&netem_qdisc_ops);
1227 module_init(netem_module_init)
1228 module_exit(netem_module_exit)
1229 MODULE_LICENSE("GPL");