1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_cbq.c Class-Based Queueing discipline.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/pkt_cls.h>
20 /* Class-Based Queueing (CBQ) algorithm.
21 =======================================
23 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
24 Management Models for Packet Networks",
25 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
27 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
29 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
32 [4] Sally Floyd and Michael Speer, "Experimental Results
33 for Class-Based Queueing", 1998, not published.
35 -----------------------------------------------------------------------
37 Algorithm skeleton was taken from NS simulator cbq.cc.
38 If someone wants to check this code against the LBL version,
39 he should take into account that ONLY the skeleton was borrowed,
40 the implementation is different. Particularly:
42 --- The WRR algorithm is different. Our version looks more
43 reasonable (I hope) and works when quanta are allowed to be
44 less than MTU, which is always the case when real time classes
45 have small rates. Note, that the statement of [3] is
46 incomplete, delay may actually be estimated even if class
47 per-round allotment is less than MTU. Namely, if per-round
48 allotment is W*r_i, and r_1+...+r_k = r < 1
50 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
52 In the worst case we have IntServ estimate with D = W*r+k*MTU
53 and C = MTU*r. The proof (if correct at all) is trivial.
56 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
57 interpret some places, which look like wrong translations
58 from NS. Anyone is advised to find these differences
59 and explain to me, why I am wrong 8).
61 --- Linux has no EOI event, so that we cannot estimate true class
62 idle time. Workaround is to consider the next dequeue event
63 as sign that previous packet is finished. This is wrong because of
64 internal device queueing, but on a permanently loaded link it is true.
65 Moreover, combined with clock integrator, this scheme looks
66 very close to an ideal solution. */
68 struct cbq_sched_data;
72 struct Qdisc_class_common common;
73 struct cbq_class *next_alive; /* next class with backlog in this priority band */
76 unsigned char priority; /* class priority */
77 unsigned char priority2; /* priority to be used after overlimit */
78 unsigned char ewma_log; /* time constant for idle time calculation */
82 /* Link-sharing scheduler parameters */
83 long maxidle; /* Class parameters: see below. */
87 struct qdisc_rate_table *R_tab;
89 /* General scheduler (WRR) parameters */
91 long quantum; /* Allotment per WRR round */
92 long weight; /* Relative allotment: see below */
94 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
95 struct cbq_class *split; /* Ptr to split node */
96 struct cbq_class *share; /* Ptr to LS parent in the class tree */
97 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
98 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
100 struct cbq_class *sibling; /* Sibling chain */
101 struct cbq_class *children; /* Pointer to children chain */
103 struct Qdisc *q; /* Elementary queueing discipline */
107 unsigned char cpriority; /* Effective priority */
108 unsigned char delayed;
109 unsigned char level; /* level of the class in hierarchy:
110 0 for leaf classes, and maximal
111 level of children + 1 for nodes.
114 psched_time_t last; /* Last end of service */
115 psched_time_t undertime;
117 long deficit; /* Saved deficit for WRR */
118 psched_time_t penalized;
119 struct gnet_stats_basic_sync bstats;
120 struct gnet_stats_queue qstats;
121 struct net_rate_estimator __rcu *rate_est;
122 struct tc_cbq_xstats xstats;
124 struct tcf_proto __rcu *filter_list;
125 struct tcf_block *block;
129 struct cbq_class *defaults[TC_PRIO_MAX + 1];
132 struct cbq_sched_data {
133 struct Qdisc_class_hash clhash; /* Hash table of all classes */
134 int nclasses[TC_CBQ_MAXPRIO + 1];
135 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
137 struct cbq_class link;
139 unsigned int activemask;
140 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
143 #ifdef CONFIG_NET_CLS_ACT
144 struct cbq_class *rx_class;
146 struct cbq_class *tx_class;
147 struct cbq_class *tx_borrowed;
149 psched_time_t now; /* Cached timestamp */
152 struct qdisc_watchdog watchdog; /* Watchdog timer,
156 psched_tdiff_t wd_expires;
162 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
164 static inline struct cbq_class *
165 cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
167 struct Qdisc_class_common *clc;
169 clc = qdisc_class_find(&q->clhash, classid);
172 return container_of(clc, struct cbq_class, common);
175 #ifdef CONFIG_NET_CLS_ACT
177 static struct cbq_class *
178 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
180 struct cbq_class *cl;
182 for (cl = this->tparent; cl; cl = cl->tparent) {
183 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
185 if (new != NULL && new != this)
193 /* Classify packet. The procedure is pretty complicated, but
194 * it allows us to combine link sharing and priority scheduling
197 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
198 * so that it resolves to split nodes. Then packets are classified
199 * by logical priority, or a more specific classifier may be attached
203 static struct cbq_class *
204 cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
206 struct cbq_sched_data *q = qdisc_priv(sch);
207 struct cbq_class *head = &q->link;
208 struct cbq_class **defmap;
209 struct cbq_class *cl = NULL;
210 u32 prio = skb->priority;
211 struct tcf_proto *fl;
212 struct tcf_result res;
215 * Step 1. If skb->priority points to one of our classes, use it.
217 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
218 (cl = cbq_class_lookup(q, prio)) != NULL)
221 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
224 defmap = head->defaults;
226 fl = rcu_dereference_bh(head->filter_list);
228 * Step 2+n. Apply classifier.
230 result = tcf_classify(skb, NULL, fl, &res, true);
231 if (!fl || result < 0)
234 cl = (void *)res.class;
236 if (TC_H_MAJ(res.classid))
237 cl = cbq_class_lookup(q, res.classid);
238 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
239 cl = defmap[TC_PRIO_BESTEFFORT];
244 if (cl->level >= head->level)
246 #ifdef CONFIG_NET_CLS_ACT
251 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
255 case TC_ACT_RECLASSIFY:
256 return cbq_reclassify(skb, cl);
263 * Step 3+n. If classifier selected a link sharing class,
264 * apply agency specific classifier.
265 * Repeat this procedure until we hit a leaf node.
274 * Step 4. No success...
276 if (TC_H_MAJ(prio) == 0 &&
277 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
278 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
285 * A packet has just been enqueued on the empty class.
286 * cbq_activate_class adds it to the tail of active class list
287 * of its priority band.
290 static inline void cbq_activate_class(struct cbq_class *cl)
292 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
293 int prio = cl->cpriority;
294 struct cbq_class *cl_tail;
296 cl_tail = q->active[prio];
297 q->active[prio] = cl;
299 if (cl_tail != NULL) {
300 cl->next_alive = cl_tail->next_alive;
301 cl_tail->next_alive = cl;
304 q->activemask |= (1<<prio);
309 * Unlink class from active chain.
310 * Note that this same procedure is done directly in cbq_dequeue*
311 * during round-robin procedure.
314 static void cbq_deactivate_class(struct cbq_class *this)
316 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
317 int prio = this->cpriority;
318 struct cbq_class *cl;
319 struct cbq_class *cl_prev = q->active[prio];
322 cl = cl_prev->next_alive;
324 cl_prev->next_alive = cl->next_alive;
325 cl->next_alive = NULL;
327 if (cl == q->active[prio]) {
328 q->active[prio] = cl_prev;
329 if (cl == q->active[prio]) {
330 q->active[prio] = NULL;
331 q->activemask &= ~(1<<prio);
337 } while ((cl_prev = cl) != q->active[prio]);
341 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
343 int toplevel = q->toplevel;
345 if (toplevel > cl->level) {
346 psched_time_t now = psched_get_time();
349 if (cl->undertime < now) {
350 q->toplevel = cl->level;
353 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
358 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
359 struct sk_buff **to_free)
361 struct cbq_sched_data *q = qdisc_priv(sch);
363 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
365 #ifdef CONFIG_NET_CLS_ACT
369 if (ret & __NET_XMIT_BYPASS)
370 qdisc_qstats_drop(sch);
371 __qdisc_drop(skb, to_free);
375 ret = qdisc_enqueue(skb, cl->q, to_free);
376 if (ret == NET_XMIT_SUCCESS) {
378 cbq_mark_toplevel(q, cl);
380 cbq_activate_class(cl);
384 if (net_xmit_drop_count(ret)) {
385 qdisc_qstats_drop(sch);
386 cbq_mark_toplevel(q, cl);
392 /* Overlimit action: penalize leaf class by adding offtime */
393 static void cbq_overlimit(struct cbq_class *cl)
395 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
396 psched_tdiff_t delay = cl->undertime - q->now;
399 delay += cl->offtime;
402 * Class goes to sleep, so that it will have no
403 * chance to work avgidle. Let's forgive it 8)
405 * BTW cbq-2.0 has a crap in this
406 * place, apparently they forgot to shift it by cl->ewma_log.
409 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
410 if (cl->avgidle < cl->minidle)
411 cl->avgidle = cl->minidle;
414 cl->undertime = q->now + delay;
416 cl->xstats.overactions++;
419 if (q->wd_expires == 0 || q->wd_expires > delay)
420 q->wd_expires = delay;
422 /* Dirty work! We must schedule wakeups based on
423 * real available rate, rather than leaf rate,
424 * which may be tiny (even zero).
426 if (q->toplevel == TC_CBQ_MAXLEVEL) {
428 psched_tdiff_t base_delay = q->wd_expires;
430 for (b = cl->borrow; b; b = b->borrow) {
431 delay = b->undertime - q->now;
432 if (delay < base_delay) {
439 q->wd_expires = base_delay;
444 * It is mission critical procedure.
446 * We "regenerate" toplevel cutoff, if transmitting class
447 * has backlog and it is not regulated. It is not part of
448 * original CBQ description, but looks more reasonable.
449 * Probably, it is wrong. This question needs further investigation.
453 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
454 struct cbq_class *borrowed)
456 if (cl && q->toplevel >= borrowed->level) {
457 if (cl->q->q.qlen > 1) {
459 if (borrowed->undertime == PSCHED_PASTPERFECT) {
460 q->toplevel = borrowed->level;
463 } while ((borrowed = borrowed->borrow) != NULL);
466 /* It is not necessary now. Uncommenting it
467 will save CPU cycles, but decrease fairness.
469 q->toplevel = TC_CBQ_MAXLEVEL;
475 cbq_update(struct cbq_sched_data *q)
477 struct cbq_class *this = q->tx_class;
478 struct cbq_class *cl = this;
483 /* Time integrator. We calculate EOS time
484 * by adding expected packet transmission time.
486 now = q->now + L2T(&q->link, len);
488 for ( ; cl; cl = cl->share) {
489 long avgidle = cl->avgidle;
492 _bstats_update(&cl->bstats, len, 1);
495 * (now - last) is total time between packet right edges.
496 * (last_pktlen/rate) is "virtual" busy time, so that
498 * idle = (now - last) - last_pktlen/rate
501 idle = now - cl->last;
502 if ((unsigned long)idle > 128*1024*1024) {
503 avgidle = cl->maxidle;
505 idle -= L2T(cl, len);
507 /* true_avgidle := (1-W)*true_avgidle + W*idle,
508 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
509 * cl->avgidle == true_avgidle/W,
512 avgidle += idle - (avgidle>>cl->ewma_log);
516 /* Overlimit or at-limit */
518 if (avgidle < cl->minidle)
519 avgidle = cl->minidle;
521 cl->avgidle = avgidle;
523 /* Calculate expected time, when this class
524 * will be allowed to send.
525 * It will occur, when:
526 * (1-W)*true_avgidle + W*delay = 0, i.e.
527 * idle = (1/W - 1)*(-true_avgidle)
529 * idle = (1 - W)*(-cl->avgidle);
531 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
535 * To maintain the rate allocated to the class,
536 * we add to undertime virtual clock,
537 * necessary to complete transmitted packet.
538 * (len/phys_bandwidth has been already passed
539 * to the moment of cbq_update)
542 idle -= L2T(&q->link, len);
543 idle += L2T(cl, len);
545 cl->undertime = now + idle;
549 cl->undertime = PSCHED_PASTPERFECT;
550 if (avgidle > cl->maxidle)
551 cl->avgidle = cl->maxidle;
553 cl->avgidle = avgidle;
555 if ((s64)(now - cl->last) > 0)
559 cbq_update_toplevel(q, this, q->tx_borrowed);
562 static inline struct cbq_class *
563 cbq_under_limit(struct cbq_class *cl)
565 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
566 struct cbq_class *this_cl = cl;
568 if (cl->tparent == NULL)
571 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
577 /* It is very suspicious place. Now overlimit
578 * action is generated for not bounded classes
579 * only if link is completely congested.
580 * Though it is in agree with ancestor-only paradigm,
581 * it looks very stupid. Particularly,
582 * it means that this chunk of code will either
583 * never be called or result in strong amplification
584 * of burstiness. Dangerous, silly, and, however,
585 * no another solution exists.
589 this_cl->qstats.overlimits++;
590 cbq_overlimit(this_cl);
593 if (cl->level > q->toplevel)
595 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
601 static inline struct sk_buff *
602 cbq_dequeue_prio(struct Qdisc *sch, int prio)
604 struct cbq_sched_data *q = qdisc_priv(sch);
605 struct cbq_class *cl_tail, *cl_prev, *cl;
609 cl_tail = cl_prev = q->active[prio];
610 cl = cl_prev->next_alive;
617 struct cbq_class *borrow = cl;
620 (borrow = cbq_under_limit(cl)) == NULL)
623 if (cl->deficit <= 0) {
624 /* Class exhausted its allotment per
625 * this round. Switch to the next one.
628 cl->deficit += cl->quantum;
632 skb = cl->q->dequeue(cl->q);
634 /* Class did not give us any skb :-(
635 * It could occur even if cl->q->q.qlen != 0
636 * f.e. if cl->q == "tbf"
641 cl->deficit -= qdisc_pkt_len(skb);
643 q->tx_borrowed = borrow;
645 #ifndef CBQ_XSTATS_BORROWS_BYTES
646 borrow->xstats.borrows++;
647 cl->xstats.borrows++;
649 borrow->xstats.borrows += qdisc_pkt_len(skb);
650 cl->xstats.borrows += qdisc_pkt_len(skb);
653 q->tx_len = qdisc_pkt_len(skb);
655 if (cl->deficit <= 0) {
656 q->active[prio] = cl;
658 cl->deficit += cl->quantum;
663 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
664 /* Class is empty or penalized.
665 * Unlink it from active chain.
667 cl_prev->next_alive = cl->next_alive;
668 cl->next_alive = NULL;
670 /* Did cl_tail point to it? */
675 /* Was it the last class in this band? */
678 q->active[prio] = NULL;
679 q->activemask &= ~(1<<prio);
681 cbq_activate_class(cl);
685 q->active[prio] = cl_tail;
688 cbq_activate_class(cl);
696 } while (cl_prev != cl_tail);
699 q->active[prio] = cl_prev;
704 static inline struct sk_buff *
705 cbq_dequeue_1(struct Qdisc *sch)
707 struct cbq_sched_data *q = qdisc_priv(sch);
709 unsigned int activemask;
711 activemask = q->activemask & 0xFF;
713 int prio = ffz(~activemask);
714 activemask &= ~(1<<prio);
715 skb = cbq_dequeue_prio(sch, prio);
722 static struct sk_buff *
723 cbq_dequeue(struct Qdisc *sch)
726 struct cbq_sched_data *q = qdisc_priv(sch);
729 now = psched_get_time();
739 skb = cbq_dequeue_1(sch);
741 qdisc_bstats_update(sch, skb);
746 /* All the classes are overlimit.
748 * It is possible, if:
750 * 1. Scheduler is empty.
751 * 2. Toplevel cutoff inhibited borrowing.
752 * 3. Root class is overlimit.
754 * Reset 2d and 3d conditions and retry.
756 * Note, that NS and cbq-2.0 are buggy, peeking
757 * an arbitrary class is appropriate for ancestor-only
758 * sharing, but not for toplevel algorithm.
760 * Our version is better, but slower, because it requires
761 * two passes, but it is unavoidable with top-level sharing.
764 if (q->toplevel == TC_CBQ_MAXLEVEL &&
765 q->link.undertime == PSCHED_PASTPERFECT)
768 q->toplevel = TC_CBQ_MAXLEVEL;
769 q->link.undertime = PSCHED_PASTPERFECT;
772 /* No packets in scheduler or nobody wants to give them to us :-(
773 * Sigh... start watchdog timer in the last case.
777 qdisc_qstats_overlimit(sch);
779 qdisc_watchdog_schedule(&q->watchdog,
780 now + q->wd_expires);
785 /* CBQ class maintenance routines */
787 static void cbq_adjust_levels(struct cbq_class *this)
794 struct cbq_class *cl;
799 if (cl->level > level)
801 } while ((cl = cl->sibling) != this->children);
803 this->level = level + 1;
804 } while ((this = this->tparent) != NULL);
807 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
809 struct cbq_class *cl;
812 if (q->quanta[prio] == 0)
815 for (h = 0; h < q->clhash.hashsize; h++) {
816 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
817 /* BUGGGG... Beware! This expression suffer of
818 * arithmetic overflows!
820 if (cl->priority == prio) {
821 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
824 if (cl->quantum <= 0 ||
825 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
826 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
827 cl->common.classid, cl->quantum);
828 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
834 static void cbq_sync_defmap(struct cbq_class *cl)
836 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
837 struct cbq_class *split = cl->split;
844 for (i = 0; i <= TC_PRIO_MAX; i++) {
845 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
846 split->defaults[i] = NULL;
849 for (i = 0; i <= TC_PRIO_MAX; i++) {
850 int level = split->level;
852 if (split->defaults[i])
855 for (h = 0; h < q->clhash.hashsize; h++) {
858 hlist_for_each_entry(c, &q->clhash.hash[h],
860 if (c->split == split && c->level < level &&
861 c->defmap & (1<<i)) {
862 split->defaults[i] = c;
870 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
872 struct cbq_class *split = NULL;
878 splitid = split->common.classid;
881 if (split == NULL || split->common.classid != splitid) {
882 for (split = cl->tparent; split; split = split->tparent)
883 if (split->common.classid == splitid)
890 if (cl->split != split) {
894 cl->defmap = def & mask;
896 cl->defmap = (cl->defmap & ~mask) | (def & mask);
901 static void cbq_unlink_class(struct cbq_class *this)
903 struct cbq_class *cl, **clp;
904 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
906 qdisc_class_hash_remove(&q->clhash, &this->common);
909 clp = &this->sibling;
917 } while ((cl = *clp) != this->sibling);
919 if (this->tparent->children == this) {
920 this->tparent->children = this->sibling;
921 if (this->sibling == this)
922 this->tparent->children = NULL;
925 WARN_ON(this->sibling != this);
929 static void cbq_link_class(struct cbq_class *this)
931 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
932 struct cbq_class *parent = this->tparent;
934 this->sibling = this;
935 qdisc_class_hash_insert(&q->clhash, &this->common);
940 if (parent->children == NULL) {
941 parent->children = this;
943 this->sibling = parent->children->sibling;
944 parent->children->sibling = this;
949 cbq_reset(struct Qdisc *sch)
951 struct cbq_sched_data *q = qdisc_priv(sch);
952 struct cbq_class *cl;
959 q->tx_borrowed = NULL;
960 qdisc_watchdog_cancel(&q->watchdog);
961 q->toplevel = TC_CBQ_MAXLEVEL;
962 q->now = psched_get_time();
964 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
965 q->active[prio] = NULL;
967 for (h = 0; h < q->clhash.hashsize; h++) {
968 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
971 cl->next_alive = NULL;
972 cl->undertime = PSCHED_PASTPERFECT;
973 cl->avgidle = cl->maxidle;
974 cl->deficit = cl->quantum;
975 cl->cpriority = cl->priority;
982 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
984 if (lss->change & TCF_CBQ_LSS_FLAGS) {
985 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
986 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
988 if (lss->change & TCF_CBQ_LSS_EWMA)
989 cl->ewma_log = lss->ewma_log;
990 if (lss->change & TCF_CBQ_LSS_AVPKT)
991 cl->avpkt = lss->avpkt;
992 if (lss->change & TCF_CBQ_LSS_MINIDLE)
993 cl->minidle = -(long)lss->minidle;
994 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
995 cl->maxidle = lss->maxidle;
996 cl->avgidle = lss->maxidle;
998 if (lss->change & TCF_CBQ_LSS_OFFTIME)
999 cl->offtime = lss->offtime;
1003 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1005 q->nclasses[cl->priority]--;
1006 q->quanta[cl->priority] -= cl->weight;
1007 cbq_normalize_quanta(q, cl->priority);
1010 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1012 q->nclasses[cl->priority]++;
1013 q->quanta[cl->priority] += cl->weight;
1014 cbq_normalize_quanta(q, cl->priority);
1017 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1019 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1022 cl->allot = wrr->allot;
1024 cl->weight = wrr->weight;
1025 if (wrr->priority) {
1026 cl->priority = wrr->priority - 1;
1027 cl->cpriority = cl->priority;
1028 if (cl->priority >= cl->priority2)
1029 cl->priority2 = TC_CBQ_MAXPRIO - 1;
1036 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1038 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1042 static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1043 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1044 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1045 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1046 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1047 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1048 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1049 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1052 static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
1054 struct netlink_ext_ack *extack)
1059 NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
1063 err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
1064 cbq_policy, extack);
1068 if (tb[TCA_CBQ_WRROPT]) {
1069 const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
1071 if (wrr->priority > TC_CBQ_MAXPRIO) {
1072 NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
1079 static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
1080 struct netlink_ext_ack *extack)
1082 struct cbq_sched_data *q = qdisc_priv(sch);
1083 struct nlattr *tb[TCA_CBQ_MAX + 1];
1084 struct tc_ratespec *r;
1087 qdisc_watchdog_init(&q->watchdog, sch);
1089 err = cbq_opt_parse(tb, opt, extack);
1093 if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
1094 NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
1098 r = nla_data(tb[TCA_CBQ_RATE]);
1100 q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
1104 err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
1108 err = qdisc_class_hash_init(&q->clhash);
1112 q->link.sibling = &q->link;
1113 q->link.common.classid = sch->handle;
1114 q->link.qdisc = sch;
1115 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1118 q->link.q = &noop_qdisc;
1120 qdisc_hash_add(q->link.q, true);
1122 q->link.priority = TC_CBQ_MAXPRIO - 1;
1123 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1124 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1125 q->link.allot = psched_mtu(qdisc_dev(sch));
1126 q->link.quantum = q->link.allot;
1127 q->link.weight = q->link.R_tab->rate.rate;
1129 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1130 q->link.avpkt = q->link.allot/2;
1131 q->link.minidle = -0x7FFFFFFF;
1133 q->toplevel = TC_CBQ_MAXLEVEL;
1134 q->now = psched_get_time();
1136 cbq_link_class(&q->link);
1138 if (tb[TCA_CBQ_LSSOPT])
1139 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
1141 cbq_addprio(q, &q->link);
1145 tcf_block_put(q->link.block);
1148 qdisc_put_rtab(q->link.R_tab);
1152 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1154 unsigned char *b = skb_tail_pointer(skb);
1156 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1157 goto nla_put_failure;
1165 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1167 unsigned char *b = skb_tail_pointer(skb);
1168 struct tc_cbq_lssopt opt;
1171 if (cl->borrow == NULL)
1172 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1173 if (cl->share == NULL)
1174 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1175 opt.ewma_log = cl->ewma_log;
1176 opt.level = cl->level;
1177 opt.avpkt = cl->avpkt;
1178 opt.maxidle = cl->maxidle;
1179 opt.minidle = (u32)(-cl->minidle);
1180 opt.offtime = cl->offtime;
1182 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1183 goto nla_put_failure;
1191 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1193 unsigned char *b = skb_tail_pointer(skb);
1194 struct tc_cbq_wrropt opt;
1196 memset(&opt, 0, sizeof(opt));
1198 opt.allot = cl->allot;
1199 opt.priority = cl->priority + 1;
1200 opt.cpriority = cl->cpriority + 1;
1201 opt.weight = cl->weight;
1202 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1203 goto nla_put_failure;
1211 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1213 unsigned char *b = skb_tail_pointer(skb);
1214 struct tc_cbq_fopt opt;
1216 if (cl->split || cl->defmap) {
1217 opt.split = cl->split ? cl->split->common.classid : 0;
1218 opt.defmap = cl->defmap;
1220 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1221 goto nla_put_failure;
1230 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1232 if (cbq_dump_lss(skb, cl) < 0 ||
1233 cbq_dump_rate(skb, cl) < 0 ||
1234 cbq_dump_wrr(skb, cl) < 0 ||
1235 cbq_dump_fopt(skb, cl) < 0)
1240 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1242 struct cbq_sched_data *q = qdisc_priv(sch);
1243 struct nlattr *nest;
1245 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1247 goto nla_put_failure;
1248 if (cbq_dump_attr(skb, &q->link) < 0)
1249 goto nla_put_failure;
1250 return nla_nest_end(skb, nest);
1253 nla_nest_cancel(skb, nest);
1258 cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1260 struct cbq_sched_data *q = qdisc_priv(sch);
1262 q->link.xstats.avgidle = q->link.avgidle;
1263 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1267 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1268 struct sk_buff *skb, struct tcmsg *tcm)
1270 struct cbq_class *cl = (struct cbq_class *)arg;
1271 struct nlattr *nest;
1274 tcm->tcm_parent = cl->tparent->common.classid;
1276 tcm->tcm_parent = TC_H_ROOT;
1277 tcm->tcm_handle = cl->common.classid;
1278 tcm->tcm_info = cl->q->handle;
1280 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1282 goto nla_put_failure;
1283 if (cbq_dump_attr(skb, cl) < 0)
1284 goto nla_put_failure;
1285 return nla_nest_end(skb, nest);
1288 nla_nest_cancel(skb, nest);
1293 cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1294 struct gnet_dump *d)
1296 struct cbq_sched_data *q = qdisc_priv(sch);
1297 struct cbq_class *cl = (struct cbq_class *)arg;
1300 cl->xstats.avgidle = cl->avgidle;
1301 cl->xstats.undertime = 0;
1302 qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
1304 if (cl->undertime != PSCHED_PASTPERFECT)
1305 cl->xstats.undertime = cl->undertime - q->now;
1307 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
1308 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1309 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
1312 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1315 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1316 struct Qdisc **old, struct netlink_ext_ack *extack)
1318 struct cbq_class *cl = (struct cbq_class *)arg;
1321 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1322 cl->common.classid, extack);
1327 *old = qdisc_replace(sch, new, &cl->q);
1331 static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
1333 struct cbq_class *cl = (struct cbq_class *)arg;
1338 static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1340 struct cbq_class *cl = (struct cbq_class *)arg;
1342 cbq_deactivate_class(cl);
1345 static unsigned long cbq_find(struct Qdisc *sch, u32 classid)
1347 struct cbq_sched_data *q = qdisc_priv(sch);
1349 return (unsigned long)cbq_class_lookup(q, classid);
1352 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1354 struct cbq_sched_data *q = qdisc_priv(sch);
1356 WARN_ON(cl->filters);
1358 tcf_block_put(cl->block);
1360 qdisc_put_rtab(cl->R_tab);
1361 gen_kill_estimator(&cl->rate_est);
1366 static void cbq_destroy(struct Qdisc *sch)
1368 struct cbq_sched_data *q = qdisc_priv(sch);
1369 struct hlist_node *next;
1370 struct cbq_class *cl;
1373 #ifdef CONFIG_NET_CLS_ACT
1377 * Filters must be destroyed first because we don't destroy the
1378 * classes from root to leafs which means that filters can still
1379 * be bound to classes which have been destroyed already. --TGR '04
1381 for (h = 0; h < q->clhash.hashsize; h++) {
1382 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1383 tcf_block_put(cl->block);
1387 for (h = 0; h < q->clhash.hashsize; h++) {
1388 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1390 cbq_destroy_class(sch, cl);
1392 qdisc_class_hash_destroy(&q->clhash);
1396 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
1397 unsigned long *arg, struct netlink_ext_ack *extack)
1400 struct cbq_sched_data *q = qdisc_priv(sch);
1401 struct cbq_class *cl = (struct cbq_class *)*arg;
1402 struct nlattr *opt = tca[TCA_OPTIONS];
1403 struct nlattr *tb[TCA_CBQ_MAX + 1];
1404 struct cbq_class *parent;
1405 struct qdisc_rate_table *rtab = NULL;
1407 err = cbq_opt_parse(tb, opt, extack);
1411 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
1412 NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
1420 cl->tparent->common.classid != parentid) {
1421 NL_SET_ERR_MSG(extack, "Invalid parent id");
1424 if (!cl->tparent && parentid != TC_H_ROOT) {
1425 NL_SET_ERR_MSG(extack, "Parent must be root");
1430 if (tb[TCA_CBQ_RATE]) {
1431 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1432 tb[TCA_CBQ_RTAB], extack);
1437 if (tca[TCA_RATE]) {
1438 err = gen_replace_estimator(&cl->bstats, NULL,
1444 NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
1445 qdisc_put_rtab(rtab);
1450 /* Change class parameters */
1453 if (cl->next_alive != NULL)
1454 cbq_deactivate_class(cl);
1457 qdisc_put_rtab(cl->R_tab);
1461 if (tb[TCA_CBQ_LSSOPT])
1462 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1464 if (tb[TCA_CBQ_WRROPT]) {
1466 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1469 if (tb[TCA_CBQ_FOPT])
1470 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1473 cbq_activate_class(cl);
1475 sch_tree_unlock(sch);
1480 if (parentid == TC_H_ROOT)
1483 if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
1484 NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
1488 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
1495 if (TC_H_MAJ(classid ^ sch->handle) ||
1496 cbq_class_lookup(q, classid)) {
1497 NL_SET_ERR_MSG(extack, "Specified class not found");
1502 classid = TC_H_MAKE(sch->handle, 0x8000);
1504 for (i = 0; i < 0x8000; i++) {
1505 if (++q->hgenerator >= 0x8000)
1507 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1512 NL_SET_ERR_MSG(extack, "Unable to generate classid");
1515 classid = classid|q->hgenerator;
1520 parent = cbq_class_lookup(q, parentid);
1523 NL_SET_ERR_MSG(extack, "Failed to find parentid");
1529 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1533 gnet_stats_basic_sync_init(&cl->bstats);
1534 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1540 if (tca[TCA_RATE]) {
1541 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1542 NULL, true, tca[TCA_RATE]);
1544 NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
1545 tcf_block_put(cl->block);
1553 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
1556 cl->q = &noop_qdisc;
1558 qdisc_hash_add(cl->q, true);
1560 cl->common.classid = classid;
1561 cl->tparent = parent;
1563 cl->allot = parent->allot;
1564 cl->quantum = cl->allot;
1565 cl->weight = cl->R_tab->rate.rate;
1569 cl->borrow = cl->tparent;
1570 if (cl->tparent != &q->link)
1571 cl->share = cl->tparent;
1572 cbq_adjust_levels(parent);
1573 cl->minidle = -0x7FFFFFFF;
1574 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1575 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1576 if (cl->ewma_log == 0)
1577 cl->ewma_log = q->link.ewma_log;
1578 if (cl->maxidle == 0)
1579 cl->maxidle = q->link.maxidle;
1581 cl->avpkt = q->link.avpkt;
1582 if (tb[TCA_CBQ_FOPT])
1583 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1584 sch_tree_unlock(sch);
1586 qdisc_class_hash_grow(sch, &q->clhash);
1588 *arg = (unsigned long)cl;
1592 qdisc_put_rtab(rtab);
1596 static int cbq_delete(struct Qdisc *sch, unsigned long arg,
1597 struct netlink_ext_ack *extack)
1599 struct cbq_sched_data *q = qdisc_priv(sch);
1600 struct cbq_class *cl = (struct cbq_class *)arg;
1602 if (cl->filters || cl->children || cl == &q->link)
1607 qdisc_purge_queue(cl->q);
1610 cbq_deactivate_class(cl);
1612 if (q->tx_borrowed == cl)
1613 q->tx_borrowed = q->tx_class;
1614 if (q->tx_class == cl) {
1616 q->tx_borrowed = NULL;
1618 #ifdef CONFIG_NET_CLS_ACT
1619 if (q->rx_class == cl)
1623 cbq_unlink_class(cl);
1624 cbq_adjust_levels(cl->tparent);
1626 cbq_sync_defmap(cl);
1629 sch_tree_unlock(sch);
1631 cbq_destroy_class(sch, cl);
1635 static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
1636 struct netlink_ext_ack *extack)
1638 struct cbq_sched_data *q = qdisc_priv(sch);
1639 struct cbq_class *cl = (struct cbq_class *)arg;
1647 static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1650 struct cbq_sched_data *q = qdisc_priv(sch);
1651 struct cbq_class *p = (struct cbq_class *)parent;
1652 struct cbq_class *cl = cbq_class_lookup(q, classid);
1655 if (p && p->level <= cl->level)
1658 return (unsigned long)cl;
1663 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1665 struct cbq_class *cl = (struct cbq_class *)arg;
1670 static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1672 struct cbq_sched_data *q = qdisc_priv(sch);
1673 struct cbq_class *cl;
1679 for (h = 0; h < q->clhash.hashsize; h++) {
1680 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1681 if (arg->count < arg->skip) {
1685 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1694 static const struct Qdisc_class_ops cbq_class_ops = {
1697 .qlen_notify = cbq_qlen_notify,
1699 .change = cbq_change_class,
1700 .delete = cbq_delete,
1702 .tcf_block = cbq_tcf_block,
1703 .bind_tcf = cbq_bind_filter,
1704 .unbind_tcf = cbq_unbind_filter,
1705 .dump = cbq_dump_class,
1706 .dump_stats = cbq_dump_class_stats,
1709 static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
1711 .cl_ops = &cbq_class_ops,
1713 .priv_size = sizeof(struct cbq_sched_data),
1714 .enqueue = cbq_enqueue,
1715 .dequeue = cbq_dequeue,
1716 .peek = qdisc_peek_dequeued,
1719 .destroy = cbq_destroy,
1722 .dump_stats = cbq_dump_stats,
1723 .owner = THIS_MODULE,
1726 static int __init cbq_module_init(void)
1728 return register_qdisc(&cbq_qdisc_ops);
1730 static void __exit cbq_module_exit(void)
1732 unregister_qdisc(&cbq_qdisc_ops);
1734 module_init(cbq_module_init)
1735 module_exit(cbq_module_exit)
1736 MODULE_LICENSE("GPL");