1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_cbq.c Class-Based Queueing discipline.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/skbuff.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/pkt_cls.h>
20 /* Class-Based Queueing (CBQ) algorithm.
21 =======================================
23 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
24 Management Models for Packet Networks",
25 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
27 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
29 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
32 [4] Sally Floyd and Michael Speer, "Experimental Results
33 for Class-Based Queueing", 1998, not published.
35 -----------------------------------------------------------------------
37 Algorithm skeleton was taken from NS simulator cbq.cc.
38 If someone wants to check this code against the LBL version,
39 he should take into account that ONLY the skeleton was borrowed,
40 the implementation is different. Particularly:
42 --- The WRR algorithm is different. Our version looks more
43 reasonable (I hope) and works when quanta are allowed to be
44 less than MTU, which is always the case when real time classes
45 have small rates. Note, that the statement of [3] is
46 incomplete, delay may actually be estimated even if class
47 per-round allotment is less than MTU. Namely, if per-round
48 allotment is W*r_i, and r_1+...+r_k = r < 1
50 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
52 In the worst case we have IntServ estimate with D = W*r+k*MTU
53 and C = MTU*r. The proof (if correct at all) is trivial.
56 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
57 interpret some places, which look like wrong translations
58 from NS. Anyone is advised to find these differences
59 and explain to me, why I am wrong 8).
61 --- Linux has no EOI event, so that we cannot estimate true class
62 idle time. Workaround is to consider the next dequeue event
63 as sign that previous packet is finished. This is wrong because of
64 internal device queueing, but on a permanently loaded link it is true.
65 Moreover, combined with clock integrator, this scheme looks
66 very close to an ideal solution. */
68 struct cbq_sched_data;
72 struct Qdisc_class_common common;
73 struct cbq_class *next_alive; /* next class with backlog in this priority band */
76 unsigned char priority; /* class priority */
77 unsigned char priority2; /* priority to be used after overlimit */
78 unsigned char ewma_log; /* time constant for idle time calculation */
82 /* Link-sharing scheduler parameters */
83 long maxidle; /* Class parameters: see below. */
87 struct qdisc_rate_table *R_tab;
89 /* General scheduler (WRR) parameters */
91 long quantum; /* Allotment per WRR round */
92 long weight; /* Relative allotment: see below */
94 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
95 struct cbq_class *split; /* Ptr to split node */
96 struct cbq_class *share; /* Ptr to LS parent in the class tree */
97 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
98 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
100 struct cbq_class *sibling; /* Sibling chain */
101 struct cbq_class *children; /* Pointer to children chain */
103 struct Qdisc *q; /* Elementary queueing discipline */
107 unsigned char cpriority; /* Effective priority */
108 unsigned char delayed;
109 unsigned char level; /* level of the class in hierarchy:
110 0 for leaf classes, and maximal
111 level of children + 1 for nodes.
114 psched_time_t last; /* Last end of service */
115 psched_time_t undertime;
117 long deficit; /* Saved deficit for WRR */
118 psched_time_t penalized;
119 struct gnet_stats_basic_packed bstats;
120 struct gnet_stats_queue qstats;
121 struct net_rate_estimator __rcu *rate_est;
122 struct tc_cbq_xstats xstats;
124 struct tcf_proto __rcu *filter_list;
125 struct tcf_block *block;
129 struct cbq_class *defaults[TC_PRIO_MAX + 1];
132 struct cbq_sched_data {
133 struct Qdisc_class_hash clhash; /* Hash table of all classes */
134 int nclasses[TC_CBQ_MAXPRIO + 1];
135 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
137 struct cbq_class link;
139 unsigned int activemask;
140 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
143 #ifdef CONFIG_NET_CLS_ACT
144 struct cbq_class *rx_class;
146 struct cbq_class *tx_class;
147 struct cbq_class *tx_borrowed;
149 psched_time_t now; /* Cached timestamp */
152 struct hrtimer delay_timer;
153 struct qdisc_watchdog watchdog; /* Watchdog timer,
157 psched_tdiff_t wd_expires;
163 #define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
165 static inline struct cbq_class *
166 cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
168 struct Qdisc_class_common *clc;
170 clc = qdisc_class_find(&q->clhash, classid);
173 return container_of(clc, struct cbq_class, common);
176 #ifdef CONFIG_NET_CLS_ACT
178 static struct cbq_class *
179 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
181 struct cbq_class *cl;
183 for (cl = this->tparent; cl; cl = cl->tparent) {
184 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
186 if (new != NULL && new != this)
194 /* Classify packet. The procedure is pretty complicated, but
195 * it allows us to combine link sharing and priority scheduling
198 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
199 * so that it resolves to split nodes. Then packets are classified
200 * by logical priority, or a more specific classifier may be attached
204 static struct cbq_class *
205 cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
207 struct cbq_sched_data *q = qdisc_priv(sch);
208 struct cbq_class *head = &q->link;
209 struct cbq_class **defmap;
210 struct cbq_class *cl = NULL;
211 u32 prio = skb->priority;
212 struct tcf_proto *fl;
213 struct tcf_result res;
216 * Step 1. If skb->priority points to one of our classes, use it.
218 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
219 (cl = cbq_class_lookup(q, prio)) != NULL)
222 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
225 defmap = head->defaults;
227 fl = rcu_dereference_bh(head->filter_list);
229 * Step 2+n. Apply classifier.
231 result = tcf_classify(skb, fl, &res, true);
232 if (!fl || result < 0)
235 cl = (void *)res.class;
237 if (TC_H_MAJ(res.classid))
238 cl = cbq_class_lookup(q, res.classid);
239 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
240 cl = defmap[TC_PRIO_BESTEFFORT];
245 if (cl->level >= head->level)
247 #ifdef CONFIG_NET_CLS_ACT
252 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
256 case TC_ACT_RECLASSIFY:
257 return cbq_reclassify(skb, cl);
264 * Step 3+n. If classifier selected a link sharing class,
265 * apply agency specific classifier.
266 * Repeat this procdure until we hit a leaf node.
275 * Step 4. No success...
277 if (TC_H_MAJ(prio) == 0 &&
278 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
279 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
286 * A packet has just been enqueued on the empty class.
287 * cbq_activate_class adds it to the tail of active class list
288 * of its priority band.
291 static inline void cbq_activate_class(struct cbq_class *cl)
293 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
294 int prio = cl->cpriority;
295 struct cbq_class *cl_tail;
297 cl_tail = q->active[prio];
298 q->active[prio] = cl;
300 if (cl_tail != NULL) {
301 cl->next_alive = cl_tail->next_alive;
302 cl_tail->next_alive = cl;
305 q->activemask |= (1<<prio);
310 * Unlink class from active chain.
311 * Note that this same procedure is done directly in cbq_dequeue*
312 * during round-robin procedure.
315 static void cbq_deactivate_class(struct cbq_class *this)
317 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
318 int prio = this->cpriority;
319 struct cbq_class *cl;
320 struct cbq_class *cl_prev = q->active[prio];
323 cl = cl_prev->next_alive;
325 cl_prev->next_alive = cl->next_alive;
326 cl->next_alive = NULL;
328 if (cl == q->active[prio]) {
329 q->active[prio] = cl_prev;
330 if (cl == q->active[prio]) {
331 q->active[prio] = NULL;
332 q->activemask &= ~(1<<prio);
338 } while ((cl_prev = cl) != q->active[prio]);
342 cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
344 int toplevel = q->toplevel;
346 if (toplevel > cl->level) {
347 psched_time_t now = psched_get_time();
350 if (cl->undertime < now) {
351 q->toplevel = cl->level;
354 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
359 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
360 struct sk_buff **to_free)
362 struct cbq_sched_data *q = qdisc_priv(sch);
363 int uninitialized_var(ret);
364 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
366 #ifdef CONFIG_NET_CLS_ACT
370 if (ret & __NET_XMIT_BYPASS)
371 qdisc_qstats_drop(sch);
372 __qdisc_drop(skb, to_free);
376 ret = qdisc_enqueue(skb, cl->q, to_free);
377 if (ret == NET_XMIT_SUCCESS) {
379 cbq_mark_toplevel(q, cl);
381 cbq_activate_class(cl);
385 if (net_xmit_drop_count(ret)) {
386 qdisc_qstats_drop(sch);
387 cbq_mark_toplevel(q, cl);
393 /* Overlimit action: penalize leaf class by adding offtime */
394 static void cbq_overlimit(struct cbq_class *cl)
396 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
397 psched_tdiff_t delay = cl->undertime - q->now;
400 delay += cl->offtime;
403 * Class goes to sleep, so that it will have no
404 * chance to work avgidle. Let's forgive it 8)
406 * BTW cbq-2.0 has a crap in this
407 * place, apparently they forgot to shift it by cl->ewma_log.
410 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
411 if (cl->avgidle < cl->minidle)
412 cl->avgidle = cl->minidle;
415 cl->undertime = q->now + delay;
417 cl->xstats.overactions++;
420 if (q->wd_expires == 0 || q->wd_expires > delay)
421 q->wd_expires = delay;
423 /* Dirty work! We must schedule wakeups based on
424 * real available rate, rather than leaf rate,
425 * which may be tiny (even zero).
427 if (q->toplevel == TC_CBQ_MAXLEVEL) {
429 psched_tdiff_t base_delay = q->wd_expires;
431 for (b = cl->borrow; b; b = b->borrow) {
432 delay = b->undertime - q->now;
433 if (delay < base_delay) {
440 q->wd_expires = base_delay;
444 static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
447 struct cbq_class *cl;
448 struct cbq_class *cl_prev = q->active[prio];
449 psched_time_t sched = now;
455 cl = cl_prev->next_alive;
456 if (now - cl->penalized > 0) {
457 cl_prev->next_alive = cl->next_alive;
458 cl->next_alive = NULL;
459 cl->cpriority = cl->priority;
461 cbq_activate_class(cl);
463 if (cl == q->active[prio]) {
464 q->active[prio] = cl_prev;
465 if (cl == q->active[prio]) {
466 q->active[prio] = NULL;
471 cl = cl_prev->next_alive;
472 } else if (sched - cl->penalized > 0)
473 sched = cl->penalized;
474 } while ((cl_prev = cl) != q->active[prio]);
479 static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
481 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
483 struct Qdisc *sch = q->watchdog.qdisc;
485 psched_tdiff_t delay = 0;
488 now = psched_get_time();
494 int prio = ffz(~pmask);
499 tmp = cbq_undelay_prio(q, prio, now);
502 if (tmp < delay || delay == 0)
511 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
512 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
515 __netif_schedule(qdisc_root(sch));
516 return HRTIMER_NORESTART;
520 * It is mission critical procedure.
522 * We "regenerate" toplevel cutoff, if transmitting class
523 * has backlog and it is not regulated. It is not part of
524 * original CBQ description, but looks more reasonable.
525 * Probably, it is wrong. This question needs further investigation.
529 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
530 struct cbq_class *borrowed)
532 if (cl && q->toplevel >= borrowed->level) {
533 if (cl->q->q.qlen > 1) {
535 if (borrowed->undertime == PSCHED_PASTPERFECT) {
536 q->toplevel = borrowed->level;
539 } while ((borrowed = borrowed->borrow) != NULL);
542 /* It is not necessary now. Uncommenting it
543 will save CPU cycles, but decrease fairness.
545 q->toplevel = TC_CBQ_MAXLEVEL;
551 cbq_update(struct cbq_sched_data *q)
553 struct cbq_class *this = q->tx_class;
554 struct cbq_class *cl = this;
559 /* Time integrator. We calculate EOS time
560 * by adding expected packet transmission time.
562 now = q->now + L2T(&q->link, len);
564 for ( ; cl; cl = cl->share) {
565 long avgidle = cl->avgidle;
568 cl->bstats.packets++;
569 cl->bstats.bytes += len;
572 * (now - last) is total time between packet right edges.
573 * (last_pktlen/rate) is "virtual" busy time, so that
575 * idle = (now - last) - last_pktlen/rate
578 idle = now - cl->last;
579 if ((unsigned long)idle > 128*1024*1024) {
580 avgidle = cl->maxidle;
582 idle -= L2T(cl, len);
584 /* true_avgidle := (1-W)*true_avgidle + W*idle,
585 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
586 * cl->avgidle == true_avgidle/W,
589 avgidle += idle - (avgidle>>cl->ewma_log);
593 /* Overlimit or at-limit */
595 if (avgidle < cl->minidle)
596 avgidle = cl->minidle;
598 cl->avgidle = avgidle;
600 /* Calculate expected time, when this class
601 * will be allowed to send.
602 * It will occur, when:
603 * (1-W)*true_avgidle + W*delay = 0, i.e.
604 * idle = (1/W - 1)*(-true_avgidle)
606 * idle = (1 - W)*(-cl->avgidle);
608 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
612 * To maintain the rate allocated to the class,
613 * we add to undertime virtual clock,
614 * necessary to complete transmitted packet.
615 * (len/phys_bandwidth has been already passed
616 * to the moment of cbq_update)
619 idle -= L2T(&q->link, len);
620 idle += L2T(cl, len);
622 cl->undertime = now + idle;
626 cl->undertime = PSCHED_PASTPERFECT;
627 if (avgidle > cl->maxidle)
628 cl->avgidle = cl->maxidle;
630 cl->avgidle = avgidle;
632 if ((s64)(now - cl->last) > 0)
636 cbq_update_toplevel(q, this, q->tx_borrowed);
639 static inline struct cbq_class *
640 cbq_under_limit(struct cbq_class *cl)
642 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
643 struct cbq_class *this_cl = cl;
645 if (cl->tparent == NULL)
648 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
654 /* It is very suspicious place. Now overlimit
655 * action is generated for not bounded classes
656 * only if link is completely congested.
657 * Though it is in agree with ancestor-only paradigm,
658 * it looks very stupid. Particularly,
659 * it means that this chunk of code will either
660 * never be called or result in strong amplification
661 * of burstiness. Dangerous, silly, and, however,
662 * no another solution exists.
666 this_cl->qstats.overlimits++;
667 cbq_overlimit(this_cl);
670 if (cl->level > q->toplevel)
672 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
678 static inline struct sk_buff *
679 cbq_dequeue_prio(struct Qdisc *sch, int prio)
681 struct cbq_sched_data *q = qdisc_priv(sch);
682 struct cbq_class *cl_tail, *cl_prev, *cl;
686 cl_tail = cl_prev = q->active[prio];
687 cl = cl_prev->next_alive;
694 struct cbq_class *borrow = cl;
697 (borrow = cbq_under_limit(cl)) == NULL)
700 if (cl->deficit <= 0) {
701 /* Class exhausted its allotment per
702 * this round. Switch to the next one.
705 cl->deficit += cl->quantum;
709 skb = cl->q->dequeue(cl->q);
711 /* Class did not give us any skb :-(
712 * It could occur even if cl->q->q.qlen != 0
713 * f.e. if cl->q == "tbf"
718 cl->deficit -= qdisc_pkt_len(skb);
720 q->tx_borrowed = borrow;
722 #ifndef CBQ_XSTATS_BORROWS_BYTES
723 borrow->xstats.borrows++;
724 cl->xstats.borrows++;
726 borrow->xstats.borrows += qdisc_pkt_len(skb);
727 cl->xstats.borrows += qdisc_pkt_len(skb);
730 q->tx_len = qdisc_pkt_len(skb);
732 if (cl->deficit <= 0) {
733 q->active[prio] = cl;
735 cl->deficit += cl->quantum;
740 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
741 /* Class is empty or penalized.
742 * Unlink it from active chain.
744 cl_prev->next_alive = cl->next_alive;
745 cl->next_alive = NULL;
747 /* Did cl_tail point to it? */
752 /* Was it the last class in this band? */
755 q->active[prio] = NULL;
756 q->activemask &= ~(1<<prio);
758 cbq_activate_class(cl);
762 q->active[prio] = cl_tail;
765 cbq_activate_class(cl);
773 } while (cl_prev != cl_tail);
776 q->active[prio] = cl_prev;
781 static inline struct sk_buff *
782 cbq_dequeue_1(struct Qdisc *sch)
784 struct cbq_sched_data *q = qdisc_priv(sch);
786 unsigned int activemask;
788 activemask = q->activemask & 0xFF;
790 int prio = ffz(~activemask);
791 activemask &= ~(1<<prio);
792 skb = cbq_dequeue_prio(sch, prio);
799 static struct sk_buff *
800 cbq_dequeue(struct Qdisc *sch)
803 struct cbq_sched_data *q = qdisc_priv(sch);
806 now = psched_get_time();
816 skb = cbq_dequeue_1(sch);
818 qdisc_bstats_update(sch, skb);
823 /* All the classes are overlimit.
825 * It is possible, if:
827 * 1. Scheduler is empty.
828 * 2. Toplevel cutoff inhibited borrowing.
829 * 3. Root class is overlimit.
831 * Reset 2d and 3d conditions and retry.
833 * Note, that NS and cbq-2.0 are buggy, peeking
834 * an arbitrary class is appropriate for ancestor-only
835 * sharing, but not for toplevel algorithm.
837 * Our version is better, but slower, because it requires
838 * two passes, but it is unavoidable with top-level sharing.
841 if (q->toplevel == TC_CBQ_MAXLEVEL &&
842 q->link.undertime == PSCHED_PASTPERFECT)
845 q->toplevel = TC_CBQ_MAXLEVEL;
846 q->link.undertime = PSCHED_PASTPERFECT;
849 /* No packets in scheduler or nobody wants to give them to us :-(
850 * Sigh... start watchdog timer in the last case.
854 qdisc_qstats_overlimit(sch);
856 qdisc_watchdog_schedule(&q->watchdog,
857 now + q->wd_expires);
862 /* CBQ class maintanance routines */
864 static void cbq_adjust_levels(struct cbq_class *this)
871 struct cbq_class *cl;
876 if (cl->level > level)
878 } while ((cl = cl->sibling) != this->children);
880 this->level = level + 1;
881 } while ((this = this->tparent) != NULL);
884 static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
886 struct cbq_class *cl;
889 if (q->quanta[prio] == 0)
892 for (h = 0; h < q->clhash.hashsize; h++) {
893 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
894 /* BUGGGG... Beware! This expression suffer of
895 * arithmetic overflows!
897 if (cl->priority == prio) {
898 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
901 if (cl->quantum <= 0 ||
902 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
903 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
904 cl->common.classid, cl->quantum);
905 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
911 static void cbq_sync_defmap(struct cbq_class *cl)
913 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
914 struct cbq_class *split = cl->split;
921 for (i = 0; i <= TC_PRIO_MAX; i++) {
922 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
923 split->defaults[i] = NULL;
926 for (i = 0; i <= TC_PRIO_MAX; i++) {
927 int level = split->level;
929 if (split->defaults[i])
932 for (h = 0; h < q->clhash.hashsize; h++) {
935 hlist_for_each_entry(c, &q->clhash.hash[h],
937 if (c->split == split && c->level < level &&
938 c->defmap & (1<<i)) {
939 split->defaults[i] = c;
947 static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
949 struct cbq_class *split = NULL;
955 splitid = split->common.classid;
958 if (split == NULL || split->common.classid != splitid) {
959 for (split = cl->tparent; split; split = split->tparent)
960 if (split->common.classid == splitid)
967 if (cl->split != split) {
971 cl->defmap = def & mask;
973 cl->defmap = (cl->defmap & ~mask) | (def & mask);
978 static void cbq_unlink_class(struct cbq_class *this)
980 struct cbq_class *cl, **clp;
981 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
983 qdisc_class_hash_remove(&q->clhash, &this->common);
986 clp = &this->sibling;
994 } while ((cl = *clp) != this->sibling);
996 if (this->tparent->children == this) {
997 this->tparent->children = this->sibling;
998 if (this->sibling == this)
999 this->tparent->children = NULL;
1002 WARN_ON(this->sibling != this);
1006 static void cbq_link_class(struct cbq_class *this)
1008 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1009 struct cbq_class *parent = this->tparent;
1011 this->sibling = this;
1012 qdisc_class_hash_insert(&q->clhash, &this->common);
1017 if (parent->children == NULL) {
1018 parent->children = this;
1020 this->sibling = parent->children->sibling;
1021 parent->children->sibling = this;
1026 cbq_reset(struct Qdisc *sch)
1028 struct cbq_sched_data *q = qdisc_priv(sch);
1029 struct cbq_class *cl;
1036 q->tx_borrowed = NULL;
1037 qdisc_watchdog_cancel(&q->watchdog);
1038 hrtimer_cancel(&q->delay_timer);
1039 q->toplevel = TC_CBQ_MAXLEVEL;
1040 q->now = psched_get_time();
1042 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1043 q->active[prio] = NULL;
1045 for (h = 0; h < q->clhash.hashsize; h++) {
1046 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1049 cl->next_alive = NULL;
1050 cl->undertime = PSCHED_PASTPERFECT;
1051 cl->avgidle = cl->maxidle;
1052 cl->deficit = cl->quantum;
1053 cl->cpriority = cl->priority;
1060 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1062 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1063 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1064 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1066 if (lss->change & TCF_CBQ_LSS_EWMA)
1067 cl->ewma_log = lss->ewma_log;
1068 if (lss->change & TCF_CBQ_LSS_AVPKT)
1069 cl->avpkt = lss->avpkt;
1070 if (lss->change & TCF_CBQ_LSS_MINIDLE)
1071 cl->minidle = -(long)lss->minidle;
1072 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
1073 cl->maxidle = lss->maxidle;
1074 cl->avgidle = lss->maxidle;
1076 if (lss->change & TCF_CBQ_LSS_OFFTIME)
1077 cl->offtime = lss->offtime;
1081 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1083 q->nclasses[cl->priority]--;
1084 q->quanta[cl->priority] -= cl->weight;
1085 cbq_normalize_quanta(q, cl->priority);
1088 static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1090 q->nclasses[cl->priority]++;
1091 q->quanta[cl->priority] += cl->weight;
1092 cbq_normalize_quanta(q, cl->priority);
1095 static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1097 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1100 cl->allot = wrr->allot;
1102 cl->weight = wrr->weight;
1103 if (wrr->priority) {
1104 cl->priority = wrr->priority - 1;
1105 cl->cpriority = cl->priority;
1106 if (cl->priority >= cl->priority2)
1107 cl->priority2 = TC_CBQ_MAXPRIO - 1;
1114 static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1116 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1120 static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1121 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1122 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1123 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1124 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1125 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1126 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1127 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1130 static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
1131 struct netlink_ext_ack *extack)
1133 struct cbq_sched_data *q = qdisc_priv(sch);
1134 struct nlattr *tb[TCA_CBQ_MAX + 1];
1135 struct tc_ratespec *r;
1138 qdisc_watchdog_init(&q->watchdog, sch);
1139 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1140 q->delay_timer.function = cbq_undelay;
1143 NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
1147 err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
1152 if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
1153 NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
1157 r = nla_data(tb[TCA_CBQ_RATE]);
1159 q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
1163 err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
1167 err = qdisc_class_hash_init(&q->clhash);
1171 q->link.sibling = &q->link;
1172 q->link.common.classid = sch->handle;
1173 q->link.qdisc = sch;
1174 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1177 q->link.q = &noop_qdisc;
1179 qdisc_hash_add(q->link.q, true);
1181 q->link.priority = TC_CBQ_MAXPRIO - 1;
1182 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1183 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1184 q->link.allot = psched_mtu(qdisc_dev(sch));
1185 q->link.quantum = q->link.allot;
1186 q->link.weight = q->link.R_tab->rate.rate;
1188 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1189 q->link.avpkt = q->link.allot/2;
1190 q->link.minidle = -0x7FFFFFFF;
1192 q->toplevel = TC_CBQ_MAXLEVEL;
1193 q->now = psched_get_time();
1195 cbq_link_class(&q->link);
1197 if (tb[TCA_CBQ_LSSOPT])
1198 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
1200 cbq_addprio(q, &q->link);
1204 tcf_block_put(q->link.block);
1207 qdisc_put_rtab(q->link.R_tab);
1211 static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1213 unsigned char *b = skb_tail_pointer(skb);
1215 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1216 goto nla_put_failure;
1224 static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1226 unsigned char *b = skb_tail_pointer(skb);
1227 struct tc_cbq_lssopt opt;
1230 if (cl->borrow == NULL)
1231 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1232 if (cl->share == NULL)
1233 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1234 opt.ewma_log = cl->ewma_log;
1235 opt.level = cl->level;
1236 opt.avpkt = cl->avpkt;
1237 opt.maxidle = cl->maxidle;
1238 opt.minidle = (u32)(-cl->minidle);
1239 opt.offtime = cl->offtime;
1241 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1242 goto nla_put_failure;
1250 static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1252 unsigned char *b = skb_tail_pointer(skb);
1253 struct tc_cbq_wrropt opt;
1255 memset(&opt, 0, sizeof(opt));
1257 opt.allot = cl->allot;
1258 opt.priority = cl->priority + 1;
1259 opt.cpriority = cl->cpriority + 1;
1260 opt.weight = cl->weight;
1261 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1262 goto nla_put_failure;
1270 static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1272 unsigned char *b = skb_tail_pointer(skb);
1273 struct tc_cbq_fopt opt;
1275 if (cl->split || cl->defmap) {
1276 opt.split = cl->split ? cl->split->common.classid : 0;
1277 opt.defmap = cl->defmap;
1279 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1280 goto nla_put_failure;
1289 static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1291 if (cbq_dump_lss(skb, cl) < 0 ||
1292 cbq_dump_rate(skb, cl) < 0 ||
1293 cbq_dump_wrr(skb, cl) < 0 ||
1294 cbq_dump_fopt(skb, cl) < 0)
1299 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1301 struct cbq_sched_data *q = qdisc_priv(sch);
1302 struct nlattr *nest;
1304 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1306 goto nla_put_failure;
1307 if (cbq_dump_attr(skb, &q->link) < 0)
1308 goto nla_put_failure;
1309 return nla_nest_end(skb, nest);
1312 nla_nest_cancel(skb, nest);
1317 cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1319 struct cbq_sched_data *q = qdisc_priv(sch);
1321 q->link.xstats.avgidle = q->link.avgidle;
1322 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1326 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1327 struct sk_buff *skb, struct tcmsg *tcm)
1329 struct cbq_class *cl = (struct cbq_class *)arg;
1330 struct nlattr *nest;
1333 tcm->tcm_parent = cl->tparent->common.classid;
1335 tcm->tcm_parent = TC_H_ROOT;
1336 tcm->tcm_handle = cl->common.classid;
1337 tcm->tcm_info = cl->q->handle;
1339 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1341 goto nla_put_failure;
1342 if (cbq_dump_attr(skb, cl) < 0)
1343 goto nla_put_failure;
1344 return nla_nest_end(skb, nest);
1347 nla_nest_cancel(skb, nest);
1352 cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1353 struct gnet_dump *d)
1355 struct cbq_sched_data *q = qdisc_priv(sch);
1356 struct cbq_class *cl = (struct cbq_class *)arg;
1359 cl->xstats.avgidle = cl->avgidle;
1360 cl->xstats.undertime = 0;
1361 qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
1363 if (cl->undertime != PSCHED_PASTPERFECT)
1364 cl->xstats.undertime = cl->undertime - q->now;
1366 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1367 d, NULL, &cl->bstats) < 0 ||
1368 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1369 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
1372 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1375 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1376 struct Qdisc **old, struct netlink_ext_ack *extack)
1378 struct cbq_class *cl = (struct cbq_class *)arg;
1381 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1382 cl->common.classid, extack);
1387 *old = qdisc_replace(sch, new, &cl->q);
1391 static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
1393 struct cbq_class *cl = (struct cbq_class *)arg;
1398 static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1400 struct cbq_class *cl = (struct cbq_class *)arg;
1402 cbq_deactivate_class(cl);
1405 static unsigned long cbq_find(struct Qdisc *sch, u32 classid)
1407 struct cbq_sched_data *q = qdisc_priv(sch);
1409 return (unsigned long)cbq_class_lookup(q, classid);
1412 static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1414 struct cbq_sched_data *q = qdisc_priv(sch);
1416 WARN_ON(cl->filters);
1418 tcf_block_put(cl->block);
1420 qdisc_put_rtab(cl->R_tab);
1421 gen_kill_estimator(&cl->rate_est);
1426 static void cbq_destroy(struct Qdisc *sch)
1428 struct cbq_sched_data *q = qdisc_priv(sch);
1429 struct hlist_node *next;
1430 struct cbq_class *cl;
1433 #ifdef CONFIG_NET_CLS_ACT
1437 * Filters must be destroyed first because we don't destroy the
1438 * classes from root to leafs which means that filters can still
1439 * be bound to classes which have been destroyed already. --TGR '04
1441 for (h = 0; h < q->clhash.hashsize; h++) {
1442 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1443 tcf_block_put(cl->block);
1447 for (h = 0; h < q->clhash.hashsize; h++) {
1448 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1450 cbq_destroy_class(sch, cl);
1452 qdisc_class_hash_destroy(&q->clhash);
1456 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
1457 unsigned long *arg, struct netlink_ext_ack *extack)
1460 struct cbq_sched_data *q = qdisc_priv(sch);
1461 struct cbq_class *cl = (struct cbq_class *)*arg;
1462 struct nlattr *opt = tca[TCA_OPTIONS];
1463 struct nlattr *tb[TCA_CBQ_MAX + 1];
1464 struct cbq_class *parent;
1465 struct qdisc_rate_table *rtab = NULL;
1468 NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
1472 err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
1477 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
1478 NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
1486 cl->tparent->common.classid != parentid) {
1487 NL_SET_ERR_MSG(extack, "Invalid parent id");
1490 if (!cl->tparent && parentid != TC_H_ROOT) {
1491 NL_SET_ERR_MSG(extack, "Parent must be root");
1496 if (tb[TCA_CBQ_RATE]) {
1497 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1498 tb[TCA_CBQ_RTAB], extack);
1503 if (tca[TCA_RATE]) {
1504 err = gen_replace_estimator(&cl->bstats, NULL,
1507 qdisc_root_sleeping_running(sch),
1510 NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
1511 qdisc_put_rtab(rtab);
1516 /* Change class parameters */
1519 if (cl->next_alive != NULL)
1520 cbq_deactivate_class(cl);
1523 qdisc_put_rtab(cl->R_tab);
1527 if (tb[TCA_CBQ_LSSOPT])
1528 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1530 if (tb[TCA_CBQ_WRROPT]) {
1532 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1535 if (tb[TCA_CBQ_FOPT])
1536 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1539 cbq_activate_class(cl);
1541 sch_tree_unlock(sch);
1546 if (parentid == TC_H_ROOT)
1549 if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
1550 NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
1554 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
1561 if (TC_H_MAJ(classid ^ sch->handle) ||
1562 cbq_class_lookup(q, classid)) {
1563 NL_SET_ERR_MSG(extack, "Specified class not found");
1568 classid = TC_H_MAKE(sch->handle, 0x8000);
1570 for (i = 0; i < 0x8000; i++) {
1571 if (++q->hgenerator >= 0x8000)
1573 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1578 NL_SET_ERR_MSG(extack, "Unable to generate classid");
1581 classid = classid|q->hgenerator;
1586 parent = cbq_class_lookup(q, parentid);
1589 NL_SET_ERR_MSG(extack, "Failed to find parentid");
1595 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1599 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1605 if (tca[TCA_RATE]) {
1606 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1608 qdisc_root_sleeping_running(sch),
1611 NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
1612 tcf_block_put(cl->block);
1620 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
1623 cl->q = &noop_qdisc;
1625 qdisc_hash_add(cl->q, true);
1627 cl->common.classid = classid;
1628 cl->tparent = parent;
1630 cl->allot = parent->allot;
1631 cl->quantum = cl->allot;
1632 cl->weight = cl->R_tab->rate.rate;
1636 cl->borrow = cl->tparent;
1637 if (cl->tparent != &q->link)
1638 cl->share = cl->tparent;
1639 cbq_adjust_levels(parent);
1640 cl->minidle = -0x7FFFFFFF;
1641 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1642 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1643 if (cl->ewma_log == 0)
1644 cl->ewma_log = q->link.ewma_log;
1645 if (cl->maxidle == 0)
1646 cl->maxidle = q->link.maxidle;
1648 cl->avpkt = q->link.avpkt;
1649 if (tb[TCA_CBQ_FOPT])
1650 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1651 sch_tree_unlock(sch);
1653 qdisc_class_hash_grow(sch, &q->clhash);
1655 *arg = (unsigned long)cl;
1659 qdisc_put_rtab(rtab);
1663 static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1665 struct cbq_sched_data *q = qdisc_priv(sch);
1666 struct cbq_class *cl = (struct cbq_class *)arg;
1668 if (cl->filters || cl->children || cl == &q->link)
1673 qdisc_purge_queue(cl->q);
1676 cbq_deactivate_class(cl);
1678 if (q->tx_borrowed == cl)
1679 q->tx_borrowed = q->tx_class;
1680 if (q->tx_class == cl) {
1682 q->tx_borrowed = NULL;
1684 #ifdef CONFIG_NET_CLS_ACT
1685 if (q->rx_class == cl)
1689 cbq_unlink_class(cl);
1690 cbq_adjust_levels(cl->tparent);
1692 cbq_sync_defmap(cl);
1695 sch_tree_unlock(sch);
1697 cbq_destroy_class(sch, cl);
1701 static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
1702 struct netlink_ext_ack *extack)
1704 struct cbq_sched_data *q = qdisc_priv(sch);
1705 struct cbq_class *cl = (struct cbq_class *)arg;
1713 static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1716 struct cbq_sched_data *q = qdisc_priv(sch);
1717 struct cbq_class *p = (struct cbq_class *)parent;
1718 struct cbq_class *cl = cbq_class_lookup(q, classid);
1721 if (p && p->level <= cl->level)
1724 return (unsigned long)cl;
1729 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1731 struct cbq_class *cl = (struct cbq_class *)arg;
1736 static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1738 struct cbq_sched_data *q = qdisc_priv(sch);
1739 struct cbq_class *cl;
1745 for (h = 0; h < q->clhash.hashsize; h++) {
1746 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1747 if (arg->count < arg->skip) {
1751 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1760 static const struct Qdisc_class_ops cbq_class_ops = {
1763 .qlen_notify = cbq_qlen_notify,
1765 .change = cbq_change_class,
1766 .delete = cbq_delete,
1768 .tcf_block = cbq_tcf_block,
1769 .bind_tcf = cbq_bind_filter,
1770 .unbind_tcf = cbq_unbind_filter,
1771 .dump = cbq_dump_class,
1772 .dump_stats = cbq_dump_class_stats,
1775 static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
1777 .cl_ops = &cbq_class_ops,
1779 .priv_size = sizeof(struct cbq_sched_data),
1780 .enqueue = cbq_enqueue,
1781 .dequeue = cbq_dequeue,
1782 .peek = qdisc_peek_dequeued,
1785 .destroy = cbq_destroy,
1788 .dump_stats = cbq_dump_stats,
1789 .owner = THIS_MODULE,
1792 static int __init cbq_module_init(void)
1794 return register_qdisc(&cbq_qdisc_ops);
1796 static void __exit cbq_module_exit(void)
1798 unregister_qdisc(&cbq_qdisc_ops);
1800 module_init(cbq_module_init)
1801 module_exit(cbq_module_exit)
1802 MODULE_LICENSE("GPL");