2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
25 /* Class-Based Queueing (CBQ) algorithm.
26 =======================================
28 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
29 Management Models for Packet Networks",
30 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
32 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
34 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
37 [4] Sally Floyd and Michael Speer, "Experimental Results
38 for Class-Based Queueing", 1998, not published.
40 -----------------------------------------------------------------------
42 Algorithm skeleton was taken from NS simulator cbq.cc.
43 If someone wants to check this code against the LBL version,
44 he should take into account that ONLY the skeleton was borrowed,
45 the implementation is different. Particularly:
47 --- The WRR algorithm is different. Our version looks more
48 reasonable (I hope) and works when quanta are allowed to be
49 less than MTU, which is always the case when real time classes
50 have small rates. Note, that the statement of [3] is
51 incomplete, delay may actually be estimated even if class
52 per-round allotment is less than MTU. Namely, if per-round
53 allotment is W*r_i, and r_1+...+r_k = r < 1
55 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
57 In the worst case we have IntServ estimate with D = W*r+k*MTU
58 and C = MTU*r. The proof (if correct at all) is trivial.
61 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
62 interpret some places, which look like wrong translations
63 from NS. Anyone is advised to find these differences
64 and explain to me, why I am wrong 8).
66 --- Linux has no EOI event, so that we cannot estimate true class
67 idle time. Workaround is to consider the next dequeue event
68 as sign that previous packet is finished. This is wrong because of
69 internal device queueing, but on a permanently loaded link it is true.
70 Moreover, combined with clock integrator, this scheme looks
71 very close to an ideal solution. */
73 struct cbq_sched_data;
77 struct Qdisc_class_common common;
78 struct cbq_class *next_alive; /* next class with backlog in this priority band */
81 unsigned char priority; /* class priority */
82 unsigned char priority2; /* priority to be used after overlimit */
83 unsigned char ewma_log; /* time constant for idle time calculation */
87 /* Link-sharing scheduler parameters */
88 long maxidle; /* Class parameters: see below. */
92 struct qdisc_rate_table *R_tab;
94 /* General scheduler (WRR) parameters */
96 long quantum; /* Allotment per WRR round */
97 long weight; /* Relative allotment: see below */
99 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
100 struct cbq_class *split; /* Ptr to split node */
101 struct cbq_class *share; /* Ptr to LS parent in the class tree */
102 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
103 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
105 struct cbq_class *sibling; /* Sibling chain */
106 struct cbq_class *children; /* Pointer to children chain */
108 struct Qdisc *q; /* Elementary queueing discipline */
112 unsigned char cpriority; /* Effective priority */
113 unsigned char delayed;
114 unsigned char level; /* level of the class in hierarchy:
115 0 for leaf classes, and maximal
116 level of children + 1 for nodes.
119 psched_time_t last; /* Last end of service */
120 psched_time_t undertime;
122 long deficit; /* Saved deficit for WRR */
123 psched_time_t penalized;
124 struct gnet_stats_basic_packed bstats;
125 struct gnet_stats_queue qstats;
126 struct net_rate_estimator __rcu *rate_est;
127 struct tc_cbq_xstats xstats;
129 struct tcf_proto __rcu *filter_list;
130 struct tcf_block *block;
134 struct cbq_class *defaults[TC_PRIO_MAX + 1];
137 struct cbq_sched_data {
138 struct Qdisc_class_hash clhash; /* Hash table of all classes */
139 int nclasses[TC_CBQ_MAXPRIO + 1];
140 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
142 struct cbq_class link;
144 unsigned int activemask;
145 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
148 #ifdef CONFIG_NET_CLS_ACT
149 struct cbq_class *rx_class;
151 struct cbq_class *tx_class;
152 struct cbq_class *tx_borrowed;
154 psched_time_t now; /* Cached timestamp */
157 struct hrtimer delay_timer;
158 struct qdisc_watchdog watchdog; /* Watchdog timer,
162 psched_tdiff_t wd_expires;
168 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
170 static inline struct cbq_class *
171 cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
173 struct Qdisc_class_common *clc;
175 clc = qdisc_class_find(&q->clhash, classid);
178 return container_of(clc, struct cbq_class, common);
181 #ifdef CONFIG_NET_CLS_ACT
183 static struct cbq_class *
184 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
186 struct cbq_class *cl;
188 for (cl = this->tparent; cl; cl = cl->tparent) {
189 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
191 if (new != NULL && new != this)
199 /* Classify packet. The procedure is pretty complicated, but
200 * it allows us to combine link sharing and priority scheduling
203 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
204 * so that it resolves to split nodes. Then packets are classified
205 * by logical priority, or a more specific classifier may be attached
209 static struct cbq_class *
210 cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
212 struct cbq_sched_data *q = qdisc_priv(sch);
213 struct cbq_class *head = &q->link;
214 struct cbq_class **defmap;
215 struct cbq_class *cl = NULL;
216 u32 prio = skb->priority;
217 struct tcf_proto *fl;
218 struct tcf_result res;
221 * Step 1. If skb->priority points to one of our classes, use it.
223 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
224 (cl = cbq_class_lookup(q, prio)) != NULL)
227 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
230 defmap = head->defaults;
232 fl = rcu_dereference_bh(head->filter_list);
234 * Step 2+n. Apply classifier.
236 result = tcf_classify(skb, fl, &res, true);
237 if (!fl || result < 0)
240 cl = (void *)res.class;
242 if (TC_H_MAJ(res.classid))
243 cl = cbq_class_lookup(q, res.classid);
244 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
245 cl = defmap[TC_PRIO_BESTEFFORT];
250 if (cl->level >= head->level)
252 #ifdef CONFIG_NET_CLS_ACT
257 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
261 case TC_ACT_RECLASSIFY:
262 return cbq_reclassify(skb, cl);
269 * Step 3+n. If classifier selected a link sharing class,
270 * apply agency specific classifier.
271 * Repeat this procdure until we hit a leaf node.
280 * Step 4. No success...
282 if (TC_H_MAJ(prio) == 0 &&
283 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
284 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
291 * A packet has just been enqueued on the empty class.
292 * cbq_activate_class adds it to the tail of active class list
293 * of its priority band.
296 static inline void cbq_activate_class(struct cbq_class *cl)
298 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
299 int prio = cl->cpriority;
300 struct cbq_class *cl_tail;
302 cl_tail = q->active[prio];
303 q->active[prio] = cl;
305 if (cl_tail != NULL) {
306 cl->next_alive = cl_tail->next_alive;
307 cl_tail->next_alive = cl;
310 q->activemask |= (1<<prio);
315 * Unlink class from active chain.
316 * Note that this same procedure is done directly in cbq_dequeue*
317 * during round-robin procedure.
320 static void cbq_deactivate_class(struct cbq_class *this)
322 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
323 int prio = this->cpriority;
324 struct cbq_class *cl;
325 struct cbq_class *cl_prev = q->active[prio];
328 cl = cl_prev->next_alive;
330 cl_prev->next_alive = cl->next_alive;
331 cl->next_alive = NULL;
333 if (cl == q->active[prio]) {
334 q->active[prio] = cl_prev;
335 if (cl == q->active[prio]) {
336 q->active[prio] = NULL;
337 q->activemask &= ~(1<<prio);
343 } while ((cl_prev = cl) != q->active[prio]);
347 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
349 int toplevel = q->toplevel;
351 if (toplevel > cl->level) {
352 psched_time_t now = psched_get_time();
355 if (cl->undertime < now) {
356 q->toplevel = cl->level;
359 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
364 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
365 struct sk_buff **to_free)
367 struct cbq_sched_data *q = qdisc_priv(sch);
368 int uninitialized_var(ret);
369 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
371 #ifdef CONFIG_NET_CLS_ACT
375 if (ret & __NET_XMIT_BYPASS)
376 qdisc_qstats_drop(sch);
377 __qdisc_drop(skb, to_free);
381 ret = qdisc_enqueue(skb, cl->q, to_free);
382 if (ret == NET_XMIT_SUCCESS) {
384 cbq_mark_toplevel(q, cl);
386 cbq_activate_class(cl);
390 if (net_xmit_drop_count(ret)) {
391 qdisc_qstats_drop(sch);
392 cbq_mark_toplevel(q, cl);
398 /* Overlimit action: penalize leaf class by adding offtime */
399 static void cbq_overlimit(struct cbq_class *cl)
401 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
402 psched_tdiff_t delay = cl->undertime - q->now;
405 delay += cl->offtime;
408 * Class goes to sleep, so that it will have no
409 * chance to work avgidle. Let's forgive it 8)
411 * BTW cbq-2.0 has a crap in this
412 * place, apparently they forgot to shift it by cl->ewma_log.
415 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
416 if (cl->avgidle < cl->minidle)
417 cl->avgidle = cl->minidle;
420 cl->undertime = q->now + delay;
422 cl->xstats.overactions++;
425 if (q->wd_expires == 0 || q->wd_expires > delay)
426 q->wd_expires = delay;
428 /* Dirty work! We must schedule wakeups based on
429 * real available rate, rather than leaf rate,
430 * which may be tiny (even zero).
432 if (q->toplevel == TC_CBQ_MAXLEVEL) {
434 psched_tdiff_t base_delay = q->wd_expires;
436 for (b = cl->borrow; b; b = b->borrow) {
437 delay = b->undertime - q->now;
438 if (delay < base_delay) {
445 q->wd_expires = base_delay;
449 static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
452 struct cbq_class *cl;
453 struct cbq_class *cl_prev = q->active[prio];
454 psched_time_t sched = now;
460 cl = cl_prev->next_alive;
461 if (now - cl->penalized > 0) {
462 cl_prev->next_alive = cl->next_alive;
463 cl->next_alive = NULL;
464 cl->cpriority = cl->priority;
466 cbq_activate_class(cl);
468 if (cl == q->active[prio]) {
469 q->active[prio] = cl_prev;
470 if (cl == q->active[prio]) {
471 q->active[prio] = NULL;
476 cl = cl_prev->next_alive;
477 } else if (sched - cl->penalized > 0)
478 sched = cl->penalized;
479 } while ((cl_prev = cl) != q->active[prio]);
484 static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
486 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
488 struct Qdisc *sch = q->watchdog.qdisc;
490 psched_tdiff_t delay = 0;
493 now = psched_get_time();
499 int prio = ffz(~pmask);
504 tmp = cbq_undelay_prio(q, prio, now);
507 if (tmp < delay || delay == 0)
516 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
517 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
520 __netif_schedule(qdisc_root(sch));
521 return HRTIMER_NORESTART;
525 * It is mission critical procedure.
527 * We "regenerate" toplevel cutoff, if transmitting class
528 * has backlog and it is not regulated. It is not part of
529 * original CBQ description, but looks more reasonable.
530 * Probably, it is wrong. This question needs further investigation.
534 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
535 struct cbq_class *borrowed)
537 if (cl && q->toplevel >= borrowed->level) {
538 if (cl->q->q.qlen > 1) {
540 if (borrowed->undertime == PSCHED_PASTPERFECT) {
541 q->toplevel = borrowed->level;
544 } while ((borrowed = borrowed->borrow) != NULL);
547 /* It is not necessary now. Uncommenting it
548 will save CPU cycles, but decrease fairness.
550 q->toplevel = TC_CBQ_MAXLEVEL;
556 cbq_update(struct cbq_sched_data *q)
558 struct cbq_class *this = q->tx_class;
559 struct cbq_class *cl = this;
564 /* Time integrator. We calculate EOS time
565 * by adding expected packet transmission time.
567 now = q->now + L2T(&q->link, len);
569 for ( ; cl; cl = cl->share) {
570 long avgidle = cl->avgidle;
573 cl->bstats.packets++;
574 cl->bstats.bytes += len;
577 * (now - last) is total time between packet right edges.
578 * (last_pktlen/rate) is "virtual" busy time, so that
580 * idle = (now - last) - last_pktlen/rate
583 idle = now - cl->last;
584 if ((unsigned long)idle > 128*1024*1024) {
585 avgidle = cl->maxidle;
587 idle -= L2T(cl, len);
589 /* true_avgidle := (1-W)*true_avgidle + W*idle,
590 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
591 * cl->avgidle == true_avgidle/W,
594 avgidle += idle - (avgidle>>cl->ewma_log);
598 /* Overlimit or at-limit */
600 if (avgidle < cl->minidle)
601 avgidle = cl->minidle;
603 cl->avgidle = avgidle;
605 /* Calculate expected time, when this class
606 * will be allowed to send.
607 * It will occur, when:
608 * (1-W)*true_avgidle + W*delay = 0, i.e.
609 * idle = (1/W - 1)*(-true_avgidle)
611 * idle = (1 - W)*(-cl->avgidle);
613 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
617 * To maintain the rate allocated to the class,
618 * we add to undertime virtual clock,
619 * necessary to complete transmitted packet.
620 * (len/phys_bandwidth has been already passed
621 * to the moment of cbq_update)
624 idle -= L2T(&q->link, len);
625 idle += L2T(cl, len);
627 cl->undertime = now + idle;
631 cl->undertime = PSCHED_PASTPERFECT;
632 if (avgidle > cl->maxidle)
633 cl->avgidle = cl->maxidle;
635 cl->avgidle = avgidle;
637 if ((s64)(now - cl->last) > 0)
641 cbq_update_toplevel(q, this, q->tx_borrowed);
644 static inline struct cbq_class *
645 cbq_under_limit(struct cbq_class *cl)
647 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
648 struct cbq_class *this_cl = cl;
650 if (cl->tparent == NULL)
653 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
659 /* It is very suspicious place. Now overlimit
660 * action is generated for not bounded classes
661 * only if link is completely congested.
662 * Though it is in agree with ancestor-only paradigm,
663 * it looks very stupid. Particularly,
664 * it means that this chunk of code will either
665 * never be called or result in strong amplification
666 * of burstiness. Dangerous, silly, and, however,
667 * no another solution exists.
671 this_cl->qstats.overlimits++;
672 cbq_overlimit(this_cl);
675 if (cl->level > q->toplevel)
677 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
683 static inline struct sk_buff *
684 cbq_dequeue_prio(struct Qdisc *sch, int prio)
686 struct cbq_sched_data *q = qdisc_priv(sch);
687 struct cbq_class *cl_tail, *cl_prev, *cl;
691 cl_tail = cl_prev = q->active[prio];
692 cl = cl_prev->next_alive;
699 struct cbq_class *borrow = cl;
702 (borrow = cbq_under_limit(cl)) == NULL)
705 if (cl->deficit <= 0) {
706 /* Class exhausted its allotment per
707 * this round. Switch to the next one.
710 cl->deficit += cl->quantum;
714 skb = cl->q->dequeue(cl->q);
716 /* Class did not give us any skb :-(
717 * It could occur even if cl->q->q.qlen != 0
718 * f.e. if cl->q == "tbf"
723 cl->deficit -= qdisc_pkt_len(skb);
725 q->tx_borrowed = borrow;
727 #ifndef CBQ_XSTATS_BORROWS_BYTES
728 borrow->xstats.borrows++;
729 cl->xstats.borrows++;
731 borrow->xstats.borrows += qdisc_pkt_len(skb);
732 cl->xstats.borrows += qdisc_pkt_len(skb);
735 q->tx_len = qdisc_pkt_len(skb);
737 if (cl->deficit <= 0) {
738 q->active[prio] = cl;
740 cl->deficit += cl->quantum;
745 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
746 /* Class is empty or penalized.
747 * Unlink it from active chain.
749 cl_prev->next_alive = cl->next_alive;
750 cl->next_alive = NULL;
752 /* Did cl_tail point to it? */
757 /* Was it the last class in this band? */
760 q->active[prio] = NULL;
761 q->activemask &= ~(1<<prio);
763 cbq_activate_class(cl);
767 q->active[prio] = cl_tail;
770 cbq_activate_class(cl);
778 } while (cl_prev != cl_tail);
781 q->active[prio] = cl_prev;
786 static inline struct sk_buff *
787 cbq_dequeue_1(struct Qdisc *sch)
789 struct cbq_sched_data *q = qdisc_priv(sch);
791 unsigned int activemask;
793 activemask = q->activemask & 0xFF;
795 int prio = ffz(~activemask);
796 activemask &= ~(1<<prio);
797 skb = cbq_dequeue_prio(sch, prio);
804 static struct sk_buff *
805 cbq_dequeue(struct Qdisc *sch)
808 struct cbq_sched_data *q = qdisc_priv(sch);
811 now = psched_get_time();
821 skb = cbq_dequeue_1(sch);
823 qdisc_bstats_update(sch, skb);
828 /* All the classes are overlimit.
830 * It is possible, if:
832 * 1. Scheduler is empty.
833 * 2. Toplevel cutoff inhibited borrowing.
834 * 3. Root class is overlimit.
836 * Reset 2d and 3d conditions and retry.
838 * Note, that NS and cbq-2.0 are buggy, peeking
839 * an arbitrary class is appropriate for ancestor-only
840 * sharing, but not for toplevel algorithm.
842 * Our version is better, but slower, because it requires
843 * two passes, but it is unavoidable with top-level sharing.
846 if (q->toplevel == TC_CBQ_MAXLEVEL &&
847 q->link.undertime == PSCHED_PASTPERFECT)
850 q->toplevel = TC_CBQ_MAXLEVEL;
851 q->link.undertime = PSCHED_PASTPERFECT;
854 /* No packets in scheduler or nobody wants to give them to us :-(
855 * Sigh... start watchdog timer in the last case.
859 qdisc_qstats_overlimit(sch);
861 qdisc_watchdog_schedule(&q->watchdog,
862 now + q->wd_expires);
867 /* CBQ class maintanance routines */
869 static void cbq_adjust_levels(struct cbq_class *this)
876 struct cbq_class *cl;
881 if (cl->level > level)
883 } while ((cl = cl->sibling) != this->children);
885 this->level = level + 1;
886 } while ((this = this->tparent) != NULL);
889 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
891 struct cbq_class *cl;
894 if (q->quanta[prio] == 0)
897 for (h = 0; h < q->clhash.hashsize; h++) {
898 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
899 /* BUGGGG... Beware! This expression suffer of
900 * arithmetic overflows!
902 if (cl->priority == prio) {
903 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
906 if (cl->quantum <= 0 ||
907 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
908 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
909 cl->common.classid, cl->quantum);
910 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
916 static void cbq_sync_defmap(struct cbq_class *cl)
918 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
919 struct cbq_class *split = cl->split;
926 for (i = 0; i <= TC_PRIO_MAX; i++) {
927 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
928 split->defaults[i] = NULL;
931 for (i = 0; i <= TC_PRIO_MAX; i++) {
932 int level = split->level;
934 if (split->defaults[i])
937 for (h = 0; h < q->clhash.hashsize; h++) {
940 hlist_for_each_entry(c, &q->clhash.hash[h],
942 if (c->split == split && c->level < level &&
943 c->defmap & (1<<i)) {
944 split->defaults[i] = c;
952 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
954 struct cbq_class *split = NULL;
960 splitid = split->common.classid;
963 if (split == NULL || split->common.classid != splitid) {
964 for (split = cl->tparent; split; split = split->tparent)
965 if (split->common.classid == splitid)
972 if (cl->split != split) {
976 cl->defmap = def & mask;
978 cl->defmap = (cl->defmap & ~mask) | (def & mask);
983 static void cbq_unlink_class(struct cbq_class *this)
985 struct cbq_class *cl, **clp;
986 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
988 qdisc_class_hash_remove(&q->clhash, &this->common);
991 clp = &this->sibling;
999 } while ((cl = *clp) != this->sibling);
1001 if (this->tparent->children == this) {
1002 this->tparent->children = this->sibling;
1003 if (this->sibling == this)
1004 this->tparent->children = NULL;
1007 WARN_ON(this->sibling != this);
1011 static void cbq_link_class(struct cbq_class *this)
1013 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1014 struct cbq_class *parent = this->tparent;
1016 this->sibling = this;
1017 qdisc_class_hash_insert(&q->clhash, &this->common);
1022 if (parent->children == NULL) {
1023 parent->children = this;
1025 this->sibling = parent->children->sibling;
1026 parent->children->sibling = this;
1031 cbq_reset(struct Qdisc *sch)
1033 struct cbq_sched_data *q = qdisc_priv(sch);
1034 struct cbq_class *cl;
1041 q->tx_borrowed = NULL;
1042 qdisc_watchdog_cancel(&q->watchdog);
1043 hrtimer_cancel(&q->delay_timer);
1044 q->toplevel = TC_CBQ_MAXLEVEL;
1045 q->now = psched_get_time();
1047 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1048 q->active[prio] = NULL;
1050 for (h = 0; h < q->clhash.hashsize; h++) {
1051 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1054 cl->next_alive = NULL;
1055 cl->undertime = PSCHED_PASTPERFECT;
1056 cl->avgidle = cl->maxidle;
1057 cl->deficit = cl->quantum;
1058 cl->cpriority = cl->priority;
1065 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1067 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1068 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1069 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1071 if (lss->change & TCF_CBQ_LSS_EWMA)
1072 cl->ewma_log = lss->ewma_log;
1073 if (lss->change & TCF_CBQ_LSS_AVPKT)
1074 cl->avpkt = lss->avpkt;
1075 if (lss->change & TCF_CBQ_LSS_MINIDLE)
1076 cl->minidle = -(long)lss->minidle;
1077 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
1078 cl->maxidle = lss->maxidle;
1079 cl->avgidle = lss->maxidle;
1081 if (lss->change & TCF_CBQ_LSS_OFFTIME)
1082 cl->offtime = lss->offtime;
1086 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1088 q->nclasses[cl->priority]--;
1089 q->quanta[cl->priority] -= cl->weight;
1090 cbq_normalize_quanta(q, cl->priority);
1093 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1095 q->nclasses[cl->priority]++;
1096 q->quanta[cl->priority] += cl->weight;
1097 cbq_normalize_quanta(q, cl->priority);
1100 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1102 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1105 cl->allot = wrr->allot;
1107 cl->weight = wrr->weight;
1108 if (wrr->priority) {
1109 cl->priority = wrr->priority - 1;
1110 cl->cpriority = cl->priority;
1111 if (cl->priority >= cl->priority2)
1112 cl->priority2 = TC_CBQ_MAXPRIO - 1;
1119 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1121 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1125 static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1126 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1127 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1128 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1129 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1130 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1131 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1132 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1135 static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
1136 struct netlink_ext_ack *extack)
1138 struct cbq_sched_data *q = qdisc_priv(sch);
1139 struct nlattr *tb[TCA_CBQ_MAX + 1];
1140 struct tc_ratespec *r;
1143 qdisc_watchdog_init(&q->watchdog, sch);
1144 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1145 q->delay_timer.function = cbq_undelay;
1148 NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
1152 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
1156 if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
1157 NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
1161 r = nla_data(tb[TCA_CBQ_RATE]);
1163 q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
1167 err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
1171 err = qdisc_class_hash_init(&q->clhash);
1175 q->link.sibling = &q->link;
1176 q->link.common.classid = sch->handle;
1177 q->link.qdisc = sch;
1178 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1181 q->link.q = &noop_qdisc;
1183 qdisc_hash_add(q->link.q, true);
1185 q->link.priority = TC_CBQ_MAXPRIO - 1;
1186 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1187 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1188 q->link.allot = psched_mtu(qdisc_dev(sch));
1189 q->link.quantum = q->link.allot;
1190 q->link.weight = q->link.R_tab->rate.rate;
1192 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1193 q->link.avpkt = q->link.allot/2;
1194 q->link.minidle = -0x7FFFFFFF;
1196 q->toplevel = TC_CBQ_MAXLEVEL;
1197 q->now = psched_get_time();
1199 cbq_link_class(&q->link);
1201 if (tb[TCA_CBQ_LSSOPT])
1202 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
1204 cbq_addprio(q, &q->link);
1208 tcf_block_put(q->link.block);
1211 qdisc_put_rtab(q->link.R_tab);
1215 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1217 unsigned char *b = skb_tail_pointer(skb);
1219 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1220 goto nla_put_failure;
1228 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1230 unsigned char *b = skb_tail_pointer(skb);
1231 struct tc_cbq_lssopt opt;
1234 if (cl->borrow == NULL)
1235 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1236 if (cl->share == NULL)
1237 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1238 opt.ewma_log = cl->ewma_log;
1239 opt.level = cl->level;
1240 opt.avpkt = cl->avpkt;
1241 opt.maxidle = cl->maxidle;
1242 opt.minidle = (u32)(-cl->minidle);
1243 opt.offtime = cl->offtime;
1245 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1246 goto nla_put_failure;
1254 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1256 unsigned char *b = skb_tail_pointer(skb);
1257 struct tc_cbq_wrropt opt;
1259 memset(&opt, 0, sizeof(opt));
1261 opt.allot = cl->allot;
1262 opt.priority = cl->priority + 1;
1263 opt.cpriority = cl->cpriority + 1;
1264 opt.weight = cl->weight;
1265 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1266 goto nla_put_failure;
1274 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1276 unsigned char *b = skb_tail_pointer(skb);
1277 struct tc_cbq_fopt opt;
1279 if (cl->split || cl->defmap) {
1280 opt.split = cl->split ? cl->split->common.classid : 0;
1281 opt.defmap = cl->defmap;
1283 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1284 goto nla_put_failure;
1293 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1295 if (cbq_dump_lss(skb, cl) < 0 ||
1296 cbq_dump_rate(skb, cl) < 0 ||
1297 cbq_dump_wrr(skb, cl) < 0 ||
1298 cbq_dump_fopt(skb, cl) < 0)
1303 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1305 struct cbq_sched_data *q = qdisc_priv(sch);
1306 struct nlattr *nest;
1308 nest = nla_nest_start(skb, TCA_OPTIONS);
1310 goto nla_put_failure;
1311 if (cbq_dump_attr(skb, &q->link) < 0)
1312 goto nla_put_failure;
1313 return nla_nest_end(skb, nest);
1316 nla_nest_cancel(skb, nest);
1321 cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1323 struct cbq_sched_data *q = qdisc_priv(sch);
1325 q->link.xstats.avgidle = q->link.avgidle;
1326 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1330 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1331 struct sk_buff *skb, struct tcmsg *tcm)
1333 struct cbq_class *cl = (struct cbq_class *)arg;
1334 struct nlattr *nest;
1337 tcm->tcm_parent = cl->tparent->common.classid;
1339 tcm->tcm_parent = TC_H_ROOT;
1340 tcm->tcm_handle = cl->common.classid;
1341 tcm->tcm_info = cl->q->handle;
1343 nest = nla_nest_start(skb, TCA_OPTIONS);
1345 goto nla_put_failure;
1346 if (cbq_dump_attr(skb, cl) < 0)
1347 goto nla_put_failure;
1348 return nla_nest_end(skb, nest);
1351 nla_nest_cancel(skb, nest);
1356 cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1357 struct gnet_dump *d)
1359 struct cbq_sched_data *q = qdisc_priv(sch);
1360 struct cbq_class *cl = (struct cbq_class *)arg;
1362 cl->xstats.avgidle = cl->avgidle;
1363 cl->xstats.undertime = 0;
1365 if (cl->undertime != PSCHED_PASTPERFECT)
1366 cl->xstats.undertime = cl->undertime - q->now;
1368 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1369 d, NULL, &cl->bstats) < 0 ||
1370 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1371 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
1374 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1377 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1378 struct Qdisc **old, struct netlink_ext_ack *extack)
1380 struct cbq_class *cl = (struct cbq_class *)arg;
1383 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1384 cl->common.classid, extack);
1389 *old = qdisc_replace(sch, new, &cl->q);
1393 static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
1395 struct cbq_class *cl = (struct cbq_class *)arg;
1400 static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1402 struct cbq_class *cl = (struct cbq_class *)arg;
1404 cbq_deactivate_class(cl);
1407 static unsigned long cbq_find(struct Qdisc *sch, u32 classid)
1409 struct cbq_sched_data *q = qdisc_priv(sch);
1411 return (unsigned long)cbq_class_lookup(q, classid);
1414 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1416 struct cbq_sched_data *q = qdisc_priv(sch);
1418 WARN_ON(cl->filters);
1420 tcf_block_put(cl->block);
1422 qdisc_put_rtab(cl->R_tab);
1423 gen_kill_estimator(&cl->rate_est);
1428 static void cbq_destroy(struct Qdisc *sch)
1430 struct cbq_sched_data *q = qdisc_priv(sch);
1431 struct hlist_node *next;
1432 struct cbq_class *cl;
1435 #ifdef CONFIG_NET_CLS_ACT
1439 * Filters must be destroyed first because we don't destroy the
1440 * classes from root to leafs which means that filters can still
1441 * be bound to classes which have been destroyed already. --TGR '04
1443 for (h = 0; h < q->clhash.hashsize; h++) {
1444 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1445 tcf_block_put(cl->block);
1449 for (h = 0; h < q->clhash.hashsize; h++) {
1450 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1452 cbq_destroy_class(sch, cl);
1454 qdisc_class_hash_destroy(&q->clhash);
1458 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
1459 unsigned long *arg, struct netlink_ext_ack *extack)
1462 struct cbq_sched_data *q = qdisc_priv(sch);
1463 struct cbq_class *cl = (struct cbq_class *)*arg;
1464 struct nlattr *opt = tca[TCA_OPTIONS];
1465 struct nlattr *tb[TCA_CBQ_MAX + 1];
1466 struct cbq_class *parent;
1467 struct qdisc_rate_table *rtab = NULL;
1470 NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
1474 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
1478 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
1479 NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
1487 cl->tparent->common.classid != parentid) {
1488 NL_SET_ERR_MSG(extack, "Invalid parent id");
1491 if (!cl->tparent && parentid != TC_H_ROOT) {
1492 NL_SET_ERR_MSG(extack, "Parent must be root");
1497 if (tb[TCA_CBQ_RATE]) {
1498 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1499 tb[TCA_CBQ_RTAB], extack);
1504 if (tca[TCA_RATE]) {
1505 err = gen_replace_estimator(&cl->bstats, NULL,
1508 qdisc_root_sleeping_running(sch),
1511 NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
1512 qdisc_put_rtab(rtab);
1517 /* Change class parameters */
1520 if (cl->next_alive != NULL)
1521 cbq_deactivate_class(cl);
1524 qdisc_put_rtab(cl->R_tab);
1528 if (tb[TCA_CBQ_LSSOPT])
1529 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1531 if (tb[TCA_CBQ_WRROPT]) {
1533 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1536 if (tb[TCA_CBQ_FOPT])
1537 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1540 cbq_activate_class(cl);
1542 sch_tree_unlock(sch);
1547 if (parentid == TC_H_ROOT)
1550 if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
1551 NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
1555 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
1562 if (TC_H_MAJ(classid ^ sch->handle) ||
1563 cbq_class_lookup(q, classid)) {
1564 NL_SET_ERR_MSG(extack, "Specified class not found");
1569 classid = TC_H_MAKE(sch->handle, 0x8000);
1571 for (i = 0; i < 0x8000; i++) {
1572 if (++q->hgenerator >= 0x8000)
1574 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1579 NL_SET_ERR_MSG(extack, "Unable to generate classid");
1582 classid = classid|q->hgenerator;
1587 parent = cbq_class_lookup(q, parentid);
1590 NL_SET_ERR_MSG(extack, "Failed to find parentid");
1596 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1600 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1606 if (tca[TCA_RATE]) {
1607 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1609 qdisc_root_sleeping_running(sch),
1612 NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
1613 tcf_block_put(cl->block);
1621 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
1624 cl->q = &noop_qdisc;
1626 qdisc_hash_add(cl->q, true);
1628 cl->common.classid = classid;
1629 cl->tparent = parent;
1631 cl->allot = parent->allot;
1632 cl->quantum = cl->allot;
1633 cl->weight = cl->R_tab->rate.rate;
1637 cl->borrow = cl->tparent;
1638 if (cl->tparent != &q->link)
1639 cl->share = cl->tparent;
1640 cbq_adjust_levels(parent);
1641 cl->minidle = -0x7FFFFFFF;
1642 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1643 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1644 if (cl->ewma_log == 0)
1645 cl->ewma_log = q->link.ewma_log;
1646 if (cl->maxidle == 0)
1647 cl->maxidle = q->link.maxidle;
1649 cl->avpkt = q->link.avpkt;
1650 if (tb[TCA_CBQ_FOPT])
1651 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1652 sch_tree_unlock(sch);
1654 qdisc_class_hash_grow(sch, &q->clhash);
1656 *arg = (unsigned long)cl;
1660 qdisc_put_rtab(rtab);
1664 static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1666 struct cbq_sched_data *q = qdisc_priv(sch);
1667 struct cbq_class *cl = (struct cbq_class *)arg;
1668 unsigned int qlen, backlog;
1670 if (cl->filters || cl->children || cl == &q->link)
1675 qlen = cl->q->q.qlen;
1676 backlog = cl->q->qstats.backlog;
1678 qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
1681 cbq_deactivate_class(cl);
1683 if (q->tx_borrowed == cl)
1684 q->tx_borrowed = q->tx_class;
1685 if (q->tx_class == cl) {
1687 q->tx_borrowed = NULL;
1689 #ifdef CONFIG_NET_CLS_ACT
1690 if (q->rx_class == cl)
1694 cbq_unlink_class(cl);
1695 cbq_adjust_levels(cl->tparent);
1697 cbq_sync_defmap(cl);
1700 sch_tree_unlock(sch);
1702 cbq_destroy_class(sch, cl);
1706 static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
1707 struct netlink_ext_ack *extack)
1709 struct cbq_sched_data *q = qdisc_priv(sch);
1710 struct cbq_class *cl = (struct cbq_class *)arg;
1718 static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1721 struct cbq_sched_data *q = qdisc_priv(sch);
1722 struct cbq_class *p = (struct cbq_class *)parent;
1723 struct cbq_class *cl = cbq_class_lookup(q, classid);
1726 if (p && p->level <= cl->level)
1729 return (unsigned long)cl;
1734 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1736 struct cbq_class *cl = (struct cbq_class *)arg;
1741 static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1743 struct cbq_sched_data *q = qdisc_priv(sch);
1744 struct cbq_class *cl;
1750 for (h = 0; h < q->clhash.hashsize; h++) {
1751 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1752 if (arg->count < arg->skip) {
1756 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1765 static const struct Qdisc_class_ops cbq_class_ops = {
1768 .qlen_notify = cbq_qlen_notify,
1770 .change = cbq_change_class,
1771 .delete = cbq_delete,
1773 .tcf_block = cbq_tcf_block,
1774 .bind_tcf = cbq_bind_filter,
1775 .unbind_tcf = cbq_unbind_filter,
1776 .dump = cbq_dump_class,
1777 .dump_stats = cbq_dump_class_stats,
1780 static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
1782 .cl_ops = &cbq_class_ops,
1784 .priv_size = sizeof(struct cbq_sched_data),
1785 .enqueue = cbq_enqueue,
1786 .dequeue = cbq_dequeue,
1787 .peek = qdisc_peek_dequeued,
1790 .destroy = cbq_destroy,
1793 .dump_stats = cbq_dump_stats,
1794 .owner = THIS_MODULE,
1797 static int __init cbq_module_init(void)
1799 return register_qdisc(&cbq_qdisc_ops);
1801 static void __exit cbq_module_exit(void)
1803 unregister_qdisc(&cbq_qdisc_ops);
1805 module_init(cbq_module_init)
1806 module_exit(cbq_module_exit)
1807 MODULE_LICENSE("GPL");