2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/slab.h>
31 #include <linux/hashtable.h>
33 #include <net/net_namespace.h>
35 #include <net/netlink.h>
36 #include <net/pkt_sched.h>
37 #include <net/pkt_cls.h>
44 This file consists of two interrelated parts:
46 1. queueing disciplines manager frontend.
47 2. traffic classes manager frontend.
49 Generally, queueing discipline ("qdisc") is a black box,
50 which is able to enqueue packets and to dequeue them (when
51 device is ready to send something) in order and at times
52 determined by algorithm hidden in it.
54 qdisc's are divided to two categories:
55 - "queues", which have no internal structure visible from outside.
56 - "schedulers", which split all the packets to "traffic classes",
57 using "packet classifiers" (look at cls_api.c)
59 In turn, classes may have child qdiscs (as rule, queues)
60 attached to them etc. etc. etc.
62 The goal of the routines in this file is to translate
63 information supplied by user in the form of handles
64 to more intelligible for kernel form, to make some sanity
65 checks and part of work, which is common to all qdiscs
66 and to provide rtnetlink notifications.
68 All real intelligent work is done inside qdisc modules.
72 Every discipline has two major routines: enqueue and dequeue.
76 dequeue usually returns a skb to send. It is allowed to return NULL,
77 but it does not mean that queue is empty, it just means that
78 discipline does not want to send anything this time.
79 Queue is really empty if q->q.qlen == 0.
80 For complicated disciplines with multiple queues q->q is not
81 real packet queue, but however q->q.qlen must be valid.
85 enqueue returns 0, if packet was enqueued successfully.
86 If packet (this one or another one) was dropped, it returns
88 NET_XMIT_DROP - this packet dropped
89 Expected action: do not backoff, but wait until queue will clear.
90 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
91 Expected action: backoff or ignore
97 like dequeue but without removing a packet from the queue
101 returns qdisc to initial state: purge all buffers, clear all
102 timers, counters (except for statistics) etc.
106 initializes newly created qdisc.
110 destroys resources allocated by init and during lifetime of qdisc.
114 changes qdisc parameters.
117 /* Protects list of registered TC modules. It is pure SMP lock. */
118 static DEFINE_RWLOCK(qdisc_mod_lock);
121 /************************************************
122 * Queueing disciplines manipulation. *
123 ************************************************/
126 /* The list of all installed queueing disciplines. */
128 static struct Qdisc_ops *qdisc_base;
130 /* Register/unregister queueing discipline */
132 int register_qdisc(struct Qdisc_ops *qops)
134 struct Qdisc_ops *q, **qp;
137 write_lock(&qdisc_mod_lock);
138 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
139 if (!strcmp(qops->id, q->id))
142 if (qops->enqueue == NULL)
143 qops->enqueue = noop_qdisc_ops.enqueue;
144 if (qops->peek == NULL) {
145 if (qops->dequeue == NULL)
146 qops->peek = noop_qdisc_ops.peek;
150 if (qops->dequeue == NULL)
151 qops->dequeue = noop_qdisc_ops.dequeue;
154 const struct Qdisc_class_ops *cops = qops->cl_ops;
156 if (!(cops->find && cops->walk && cops->leaf))
159 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
167 write_unlock(&qdisc_mod_lock);
174 EXPORT_SYMBOL(register_qdisc);
176 int unregister_qdisc(struct Qdisc_ops *qops)
178 struct Qdisc_ops *q, **qp;
181 write_lock(&qdisc_mod_lock);
182 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
190 write_unlock(&qdisc_mod_lock);
193 EXPORT_SYMBOL(unregister_qdisc);
195 /* Get default qdisc if not otherwise specified */
196 void qdisc_get_default(char *name, size_t len)
198 read_lock(&qdisc_mod_lock);
199 strlcpy(name, default_qdisc_ops->id, len);
200 read_unlock(&qdisc_mod_lock);
203 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
205 struct Qdisc_ops *q = NULL;
207 for (q = qdisc_base; q; q = q->next) {
208 if (!strcmp(name, q->id)) {
209 if (!try_module_get(q->owner))
218 /* Set new default qdisc to use */
219 int qdisc_set_default(const char *name)
221 const struct Qdisc_ops *ops;
223 if (!capable(CAP_NET_ADMIN))
226 write_lock(&qdisc_mod_lock);
227 ops = qdisc_lookup_default(name);
229 /* Not found, drop lock and try to load module */
230 write_unlock(&qdisc_mod_lock);
231 request_module("sch_%s", name);
232 write_lock(&qdisc_mod_lock);
234 ops = qdisc_lookup_default(name);
238 /* Set new default */
239 module_put(default_qdisc_ops->owner);
240 default_qdisc_ops = ops;
242 write_unlock(&qdisc_mod_lock);
244 return ops ? 0 : -ENOENT;
247 #ifdef CONFIG_NET_SCH_DEFAULT
248 /* Set default value from kernel config */
249 static int __init sch_default_qdisc(void)
251 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
253 late_initcall(sch_default_qdisc);
256 /* We know handle. Find qdisc among all qdisc's attached to device
257 * (root qdisc, all its children, children of children etc.)
258 * Note: caller either uses rtnl or rcu_read_lock()
261 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
265 if (!qdisc_dev(root))
266 return (root->handle == handle ? root : NULL);
268 if (!(root->flags & TCQ_F_BUILTIN) &&
269 root->handle == handle)
272 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
273 if (q->handle == handle)
279 void qdisc_hash_add(struct Qdisc *q, bool invisible)
281 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
283 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
285 q->flags |= TCQ_F_INVISIBLE;
288 EXPORT_SYMBOL(qdisc_hash_add);
290 void qdisc_hash_del(struct Qdisc *q)
292 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
294 hash_del_rcu(&q->hash);
297 EXPORT_SYMBOL(qdisc_hash_del);
299 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
305 q = qdisc_match_from_root(dev->qdisc, handle);
309 if (dev_ingress_queue(dev))
310 q = qdisc_match_from_root(
311 dev_ingress_queue(dev)->qdisc_sleeping,
317 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
319 struct netdev_queue *nq;
324 q = qdisc_match_from_root(dev->qdisc, handle);
328 nq = dev_ingress_queue_rcu(dev);
330 q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
335 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
338 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
342 cl = cops->find(p, classid);
346 return cops->leaf(p, cl);
349 /* Find queueing discipline by name */
351 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
353 struct Qdisc_ops *q = NULL;
356 read_lock(&qdisc_mod_lock);
357 for (q = qdisc_base; q; q = q->next) {
358 if (nla_strcmp(kind, q->id) == 0) {
359 if (!try_module_get(q->owner))
364 read_unlock(&qdisc_mod_lock);
369 /* The linklayer setting were not transferred from iproute2, in older
370 * versions, and the rate tables lookup systems have been dropped in
371 * the kernel. To keep backward compatible with older iproute2 tc
372 * utils, we detect the linklayer setting by detecting if the rate
373 * table were modified.
375 * For linklayer ATM table entries, the rate table will be aligned to
376 * 48 bytes, thus some table entries will contain the same value. The
377 * mpu (min packet unit) is also encoded into the old rate table, thus
378 * starting from the mpu, we find low and high table entries for
379 * mapping this cell. If these entries contain the same value, when
380 * the rate tables have been modified for linklayer ATM.
382 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
383 * and then roundup to the next cell, calc the table entry one below,
386 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
388 int low = roundup(r->mpu, 48);
389 int high = roundup(low+1, 48);
390 int cell_low = low >> r->cell_log;
391 int cell_high = (high >> r->cell_log) - 1;
393 /* rtab is too inaccurate at rates > 100Mbit/s */
394 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
395 pr_debug("TC linklayer: Giving up ATM detection\n");
396 return TC_LINKLAYER_ETHERNET;
399 if ((cell_high > cell_low) && (cell_high < 256)
400 && (rtab[cell_low] == rtab[cell_high])) {
401 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
402 cell_low, cell_high, rtab[cell_high]);
403 return TC_LINKLAYER_ATM;
405 return TC_LINKLAYER_ETHERNET;
408 static struct qdisc_rate_table *qdisc_rtab_list;
410 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
412 struct netlink_ext_ack *extack)
414 struct qdisc_rate_table *rtab;
416 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
417 nla_len(tab) != TC_RTAB_SIZE) {
418 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
422 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
423 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
424 !memcmp(&rtab->data, nla_data(tab), 1024)) {
430 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
434 memcpy(rtab->data, nla_data(tab), 1024);
435 if (r->linklayer == TC_LINKLAYER_UNAWARE)
436 r->linklayer = __detect_linklayer(r, rtab->data);
437 rtab->next = qdisc_rtab_list;
438 qdisc_rtab_list = rtab;
440 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
444 EXPORT_SYMBOL(qdisc_get_rtab);
446 void qdisc_put_rtab(struct qdisc_rate_table *tab)
448 struct qdisc_rate_table *rtab, **rtabp;
450 if (!tab || --tab->refcnt)
453 for (rtabp = &qdisc_rtab_list;
454 (rtab = *rtabp) != NULL;
455 rtabp = &rtab->next) {
463 EXPORT_SYMBOL(qdisc_put_rtab);
465 static LIST_HEAD(qdisc_stab_list);
467 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
468 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
469 [TCA_STAB_DATA] = { .type = NLA_BINARY },
472 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
473 struct netlink_ext_ack *extack)
475 struct nlattr *tb[TCA_STAB_MAX + 1];
476 struct qdisc_size_table *stab;
477 struct tc_sizespec *s;
478 unsigned int tsize = 0;
482 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, extack);
485 if (!tb[TCA_STAB_BASE]) {
486 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
487 return ERR_PTR(-EINVAL);
490 s = nla_data(tb[TCA_STAB_BASE]);
493 if (!tb[TCA_STAB_DATA]) {
494 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
495 return ERR_PTR(-EINVAL);
497 tab = nla_data(tb[TCA_STAB_DATA]);
498 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
501 if (tsize != s->tsize || (!tab && tsize > 0)) {
502 NL_SET_ERR_MSG(extack, "Invalid size of size table");
503 return ERR_PTR(-EINVAL);
506 list_for_each_entry(stab, &qdisc_stab_list, list) {
507 if (memcmp(&stab->szopts, s, sizeof(*s)))
509 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
515 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
517 return ERR_PTR(-ENOMEM);
522 memcpy(stab->data, tab, tsize * sizeof(u16));
524 list_add_tail(&stab->list, &qdisc_stab_list);
529 void qdisc_put_stab(struct qdisc_size_table *tab)
534 if (--tab->refcnt == 0) {
535 list_del(&tab->list);
539 EXPORT_SYMBOL(qdisc_put_stab);
541 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
545 nest = nla_nest_start(skb, TCA_STAB);
547 goto nla_put_failure;
548 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
549 goto nla_put_failure;
550 nla_nest_end(skb, nest);
558 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
559 const struct qdisc_size_table *stab)
563 pkt_len = skb->len + stab->szopts.overhead;
564 if (unlikely(!stab->szopts.tsize))
567 slot = pkt_len + stab->szopts.cell_align;
568 if (unlikely(slot < 0))
571 slot >>= stab->szopts.cell_log;
572 if (likely(slot < stab->szopts.tsize))
573 pkt_len = stab->data[slot];
575 pkt_len = stab->data[stab->szopts.tsize - 1] *
576 (slot / stab->szopts.tsize) +
577 stab->data[slot % stab->szopts.tsize];
579 pkt_len <<= stab->szopts.size_log;
581 if (unlikely(pkt_len < 1))
583 qdisc_skb_cb(skb)->pkt_len = pkt_len;
585 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
587 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
589 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
590 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
591 txt, qdisc->ops->id, qdisc->handle >> 16);
592 qdisc->flags |= TCQ_F_WARN_NONWC;
595 EXPORT_SYMBOL(qdisc_warn_nonwc);
597 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
599 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
603 __netif_schedule(qdisc_root(wd->qdisc));
606 return HRTIMER_NORESTART;
609 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
612 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
613 wd->timer.function = qdisc_watchdog;
616 EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
618 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
620 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
622 EXPORT_SYMBOL(qdisc_watchdog_init);
624 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
626 if (test_bit(__QDISC_STATE_DEACTIVATED,
627 &qdisc_root_sleeping(wd->qdisc)->state))
630 if (wd->last_expires == expires)
633 wd->last_expires = expires;
634 hrtimer_start(&wd->timer,
635 ns_to_ktime(expires),
636 HRTIMER_MODE_ABS_PINNED);
638 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
640 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
642 hrtimer_cancel(&wd->timer);
644 EXPORT_SYMBOL(qdisc_watchdog_cancel);
646 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
648 struct hlist_head *h;
651 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
654 for (i = 0; i < n; i++)
655 INIT_HLIST_HEAD(&h[i]);
660 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
662 struct Qdisc_class_common *cl;
663 struct hlist_node *next;
664 struct hlist_head *nhash, *ohash;
665 unsigned int nsize, nmask, osize;
668 /* Rehash when load factor exceeds 0.75 */
669 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
671 nsize = clhash->hashsize * 2;
673 nhash = qdisc_class_hash_alloc(nsize);
677 ohash = clhash->hash;
678 osize = clhash->hashsize;
681 for (i = 0; i < osize; i++) {
682 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
683 h = qdisc_class_hash(cl->classid, nmask);
684 hlist_add_head(&cl->hnode, &nhash[h]);
687 clhash->hash = nhash;
688 clhash->hashsize = nsize;
689 clhash->hashmask = nmask;
690 sch_tree_unlock(sch);
694 EXPORT_SYMBOL(qdisc_class_hash_grow);
696 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
698 unsigned int size = 4;
700 clhash->hash = qdisc_class_hash_alloc(size);
703 clhash->hashsize = size;
704 clhash->hashmask = size - 1;
705 clhash->hashelems = 0;
708 EXPORT_SYMBOL(qdisc_class_hash_init);
710 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
712 kvfree(clhash->hash);
714 EXPORT_SYMBOL(qdisc_class_hash_destroy);
716 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
717 struct Qdisc_class_common *cl)
721 INIT_HLIST_NODE(&cl->hnode);
722 h = qdisc_class_hash(cl->classid, clhash->hashmask);
723 hlist_add_head(&cl->hnode, &clhash->hash[h]);
726 EXPORT_SYMBOL(qdisc_class_hash_insert);
728 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
729 struct Qdisc_class_common *cl)
731 hlist_del(&cl->hnode);
734 EXPORT_SYMBOL(qdisc_class_hash_remove);
736 /* Allocate an unique handle from space managed by kernel
737 * Possible range is [8000-FFFF]:0000 (0x8000 values)
739 static u32 qdisc_alloc_handle(struct net_device *dev)
742 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
745 autohandle += TC_H_MAKE(0x10000U, 0);
746 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
747 autohandle = TC_H_MAKE(0x80000000U, 0);
748 if (!qdisc_lookup(dev, autohandle))
756 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
758 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
759 const struct Qdisc_class_ops *cops;
765 if (n == 0 && len == 0)
767 drops = max_t(int, n, 0);
769 while ((parentid = sch->parent)) {
770 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
773 if (sch->flags & TCQ_F_NOPARENT)
775 /* Notify parent qdisc only if child qdisc becomes empty.
777 * If child was empty even before update then backlog
778 * counter is screwed and we skip notification because
779 * parent class is already passive.
781 * If the original child was offloaded then it is allowed
782 * to be seem as empty, so the parent is notified anyway.
784 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
785 !qdisc_is_offloaded);
786 /* TODO: perform the search on a per txq basis */
787 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
789 WARN_ON_ONCE(parentid != TC_H_ROOT);
792 cops = sch->ops->cl_ops;
793 if (notify && cops->qlen_notify) {
794 cl = cops->find(sch, parentid);
795 cops->qlen_notify(sch, cl);
798 sch->qstats.backlog -= len;
799 __qdisc_qstats_drop(sch, drops);
803 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
805 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
808 struct net_device *dev = qdisc_dev(sch);
811 sch->flags &= ~TCQ_F_OFFLOADED;
812 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
815 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
816 if (err == -EOPNOTSUPP)
820 sch->flags |= TCQ_F_OFFLOADED;
824 EXPORT_SYMBOL(qdisc_offload_dump_helper);
826 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
827 struct Qdisc *new, struct Qdisc *old,
828 enum tc_setup_type type, void *type_data,
829 struct netlink_ext_ack *extack)
831 bool any_qdisc_is_offloaded;
834 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
837 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
839 /* Don't report error if the graft is part of destroy operation. */
840 if (!err || !new || new == &noop_qdisc)
843 /* Don't report error if the parent, the old child and the new
844 * one are not offloaded.
846 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
847 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
848 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
850 if (any_qdisc_is_offloaded)
851 NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
853 EXPORT_SYMBOL(qdisc_offload_graft_helper);
855 static void qdisc_offload_graft_root(struct net_device *dev,
856 struct Qdisc *new, struct Qdisc *old,
857 struct netlink_ext_ack *extack)
859 struct tc_root_qopt_offload graft_offload = {
860 .command = TC_ROOT_GRAFT,
861 .handle = new ? new->handle : 0,
862 .ingress = (new && new->flags & TCQ_F_INGRESS) ||
863 (old && old->flags & TCQ_F_INGRESS),
866 qdisc_offload_graft_helper(dev, NULL, new, old,
867 TC_SETUP_ROOT_QDISC, &graft_offload, extack);
870 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
871 u32 portid, u32 seq, u16 flags, int event)
873 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
874 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
876 struct nlmsghdr *nlh;
877 unsigned char *b = skb_tail_pointer(skb);
879 struct qdisc_size_table *stab;
884 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
887 tcm = nlmsg_data(nlh);
888 tcm->tcm_family = AF_UNSPEC;
891 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
892 tcm->tcm_parent = clid;
893 tcm->tcm_handle = q->handle;
894 tcm->tcm_info = refcount_read(&q->refcnt);
895 if (nla_put_string(skb, TCA_KIND, q->ops->id))
896 goto nla_put_failure;
897 if (q->ops->ingress_block_get) {
898 block_index = q->ops->ingress_block_get(q);
900 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
901 goto nla_put_failure;
903 if (q->ops->egress_block_get) {
904 block_index = q->ops->egress_block_get(q);
906 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
907 goto nla_put_failure;
909 if (q->ops->dump && q->ops->dump(q, skb) < 0)
910 goto nla_put_failure;
911 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
912 goto nla_put_failure;
913 qlen = qdisc_qlen_sum(q);
915 stab = rtnl_dereference(q->stab);
916 if (stab && qdisc_dump_stab(skb, stab) < 0)
917 goto nla_put_failure;
919 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
920 NULL, &d, TCA_PAD) < 0)
921 goto nla_put_failure;
923 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
924 goto nla_put_failure;
926 if (qdisc_is_percpu_stats(q)) {
927 cpu_bstats = q->cpu_bstats;
928 cpu_qstats = q->cpu_qstats;
931 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
932 &d, cpu_bstats, &q->bstats) < 0 ||
933 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
934 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
935 goto nla_put_failure;
937 if (gnet_stats_finish_copy(&d) < 0)
938 goto nla_put_failure;
940 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
949 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
951 if (q->flags & TCQ_F_BUILTIN)
953 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
959 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
960 struct nlmsghdr *n, u32 clid,
961 struct Qdisc *old, struct Qdisc *new)
964 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
966 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
970 if (old && !tc_qdisc_dump_ignore(old, false)) {
971 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
972 0, RTM_DELQDISC) < 0)
975 if (new && !tc_qdisc_dump_ignore(new, false)) {
976 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
977 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
982 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
983 n->nlmsg_flags & NLM_F_ECHO);
990 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
991 struct nlmsghdr *n, u32 clid,
992 struct Qdisc *old, struct Qdisc *new)
995 qdisc_notify(net, skb, n, clid, old, new);
1001 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
1004 * When appropriate send a netlink notification using 'skb'
1007 * On success, destroy old qdisc.
1010 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1011 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1012 struct Qdisc *new, struct Qdisc *old,
1013 struct netlink_ext_ack *extack)
1015 struct Qdisc *q = old;
1016 struct net *net = dev_net(dev);
1018 if (parent == NULL) {
1019 unsigned int i, num_q, ingress;
1022 num_q = dev->num_tx_queues;
1023 if ((q && q->flags & TCQ_F_INGRESS) ||
1024 (new && new->flags & TCQ_F_INGRESS)) {
1027 if (!dev_ingress_queue(dev)) {
1028 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1033 if (dev->flags & IFF_UP)
1034 dev_deactivate(dev);
1036 qdisc_offload_graft_root(dev, new, old, extack);
1038 if (new && new->ops->attach)
1041 for (i = 0; i < num_q; i++) {
1042 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
1045 dev_queue = netdev_get_tx_queue(dev, i);
1047 old = dev_graft_qdisc(dev_queue, new);
1049 qdisc_refcount_inc(new);
1057 notify_and_destroy(net, skb, n, classid,
1059 if (new && !new->ops->attach)
1060 qdisc_refcount_inc(new);
1061 dev->qdisc = new ? : &noop_qdisc;
1063 if (new && new->ops->attach)
1064 new->ops->attach(new);
1066 notify_and_destroy(net, skb, n, classid, old, new);
1069 if (dev->flags & IFF_UP)
1072 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1076 /* Only support running class lockless if parent is lockless */
1077 if (new && (new->flags & TCQ_F_NOLOCK) &&
1078 parent && !(parent->flags & TCQ_F_NOLOCK))
1079 new->flags &= ~TCQ_F_NOLOCK;
1081 if (!cops || !cops->graft)
1084 cl = cops->find(parent, classid);
1086 NL_SET_ERR_MSG(extack, "Specified class not found");
1090 err = cops->graft(parent, cl, new, &old, extack);
1093 notify_and_destroy(net, skb, n, classid, old, new);
1098 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1099 struct netlink_ext_ack *extack)
1103 if (tca[TCA_INGRESS_BLOCK]) {
1104 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1107 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1110 if (!sch->ops->ingress_block_set) {
1111 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1114 sch->ops->ingress_block_set(sch, block_index);
1116 if (tca[TCA_EGRESS_BLOCK]) {
1117 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1120 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1123 if (!sch->ops->egress_block_set) {
1124 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1127 sch->ops->egress_block_set(sch, block_index);
1133 Allocate and initialize new qdisc.
1135 Parameters are passed via opt.
1138 static struct Qdisc *qdisc_create(struct net_device *dev,
1139 struct netdev_queue *dev_queue,
1140 struct Qdisc *p, u32 parent, u32 handle,
1141 struct nlattr **tca, int *errp,
1142 struct netlink_ext_ack *extack)
1145 struct nlattr *kind = tca[TCA_KIND];
1147 struct Qdisc_ops *ops;
1148 struct qdisc_size_table *stab;
1150 ops = qdisc_lookup_ops(kind);
1151 #ifdef CONFIG_MODULES
1152 if (ops == NULL && kind != NULL) {
1153 char name[IFNAMSIZ];
1154 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
1155 /* We dropped the RTNL semaphore in order to
1156 * perform the module load. So, even if we
1157 * succeeded in loading the module we have to
1158 * tell the caller to replay the request. We
1159 * indicate this using -EAGAIN.
1160 * We replay the request because the device may
1161 * go away in the mean time.
1164 request_module("sch_%s", name);
1166 ops = qdisc_lookup_ops(kind);
1168 /* We will try again qdisc_lookup_ops,
1169 * so don't keep a reference.
1171 module_put(ops->owner);
1181 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1185 sch = qdisc_alloc(dev_queue, ops, extack);
1191 sch->parent = parent;
1193 if (handle == TC_H_INGRESS) {
1194 sch->flags |= TCQ_F_INGRESS;
1195 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1198 handle = qdisc_alloc_handle(dev);
1200 NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1205 if (!netif_is_multiqueue(dev))
1206 sch->flags |= TCQ_F_ONETXQUEUE;
1209 sch->handle = handle;
1211 /* This exist to keep backward compatible with a userspace
1212 * loophole, what allowed userspace to get IFF_NO_QUEUE
1213 * facility on older kernels by setting tx_queue_len=0 (prior
1214 * to qdisc init), and then forgot to reinit tx_queue_len
1215 * before again attaching a qdisc.
1217 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1218 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1219 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1222 err = qdisc_block_indexes_set(sch, tca, extack);
1227 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1232 if (tca[TCA_STAB]) {
1233 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1235 err = PTR_ERR(stab);
1238 rcu_assign_pointer(sch->stab, stab);
1240 if (tca[TCA_RATE]) {
1241 seqcount_t *running;
1244 if (sch->flags & TCQ_F_MQROOT) {
1245 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1249 if (sch->parent != TC_H_ROOT &&
1250 !(sch->flags & TCQ_F_INGRESS) &&
1251 (!p || !(p->flags & TCQ_F_MQROOT)))
1252 running = qdisc_root_sleeping_running(sch);
1254 running = &sch->running;
1256 err = gen_new_estimator(&sch->bstats,
1263 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1268 qdisc_hash_add(sch, false);
1273 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1280 module_put(ops->owner);
1287 * Any broken qdiscs that would require a ops->reset() here?
1288 * The qdisc was never in action so it shouldn't be necessary.
1290 qdisc_put_stab(rtnl_dereference(sch->stab));
1296 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1297 struct netlink_ext_ack *extack)
1299 struct qdisc_size_table *ostab, *stab = NULL;
1302 if (tca[TCA_OPTIONS]) {
1303 if (!sch->ops->change) {
1304 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1307 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1308 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1311 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1316 if (tca[TCA_STAB]) {
1317 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1319 return PTR_ERR(stab);
1322 ostab = rtnl_dereference(sch->stab);
1323 rcu_assign_pointer(sch->stab, stab);
1324 qdisc_put_stab(ostab);
1326 if (tca[TCA_RATE]) {
1327 /* NB: ignores errors from replace_estimator
1328 because change can't be undone. */
1329 if (sch->flags & TCQ_F_MQROOT)
1331 gen_replace_estimator(&sch->bstats,
1335 qdisc_root_sleeping_running(sch),
1342 struct check_loop_arg {
1343 struct qdisc_walker w;
1348 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1349 struct qdisc_walker *w);
1351 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1353 struct check_loop_arg arg;
1355 if (q->ops->cl_ops == NULL)
1358 arg.w.stop = arg.w.skip = arg.w.count = 0;
1359 arg.w.fn = check_loop_fn;
1362 q->ops->cl_ops->walk(q, &arg.w);
1363 return arg.w.stop ? -ELOOP : 0;
1367 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1370 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1371 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1373 leaf = cops->leaf(q, cl);
1375 if (leaf == arg->p || arg->depth > 7)
1377 return check_loop(leaf, arg->p, arg->depth + 1);
1382 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1383 [TCA_KIND] = { .type = NLA_STRING },
1384 [TCA_RATE] = { .type = NLA_BINARY,
1385 .len = sizeof(struct tc_estimator) },
1386 [TCA_STAB] = { .type = NLA_NESTED },
1387 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
1388 [TCA_CHAIN] = { .type = NLA_U32 },
1389 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
1390 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1397 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1398 struct netlink_ext_ack *extack)
1400 struct net *net = sock_net(skb->sk);
1401 struct tcmsg *tcm = nlmsg_data(n);
1402 struct nlattr *tca[TCA_MAX + 1];
1403 struct net_device *dev;
1405 struct Qdisc *q = NULL;
1406 struct Qdisc *p = NULL;
1409 if ((n->nlmsg_type != RTM_GETQDISC) &&
1410 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1413 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
1418 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1422 clid = tcm->tcm_parent;
1424 if (clid != TC_H_ROOT) {
1425 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1426 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1428 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1431 q = qdisc_leaf(p, clid);
1432 } else if (dev_ingress_queue(dev)) {
1433 q = dev_ingress_queue(dev)->qdisc_sleeping;
1439 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1443 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1444 NL_SET_ERR_MSG(extack, "Invalid handle");
1448 q = qdisc_lookup(dev, tcm->tcm_handle);
1450 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1455 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1456 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1460 if (n->nlmsg_type == RTM_DELQDISC) {
1462 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1465 if (q->handle == 0) {
1466 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1469 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1473 qdisc_notify(net, skb, n, clid, NULL, q);
1479 * Create/change qdisc.
1482 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1483 struct netlink_ext_ack *extack)
1485 struct net *net = sock_net(skb->sk);
1487 struct nlattr *tca[TCA_MAX + 1];
1488 struct net_device *dev;
1490 struct Qdisc *q, *p;
1493 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1497 /* Reinit, just in case something touches this. */
1498 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
1503 tcm = nlmsg_data(n);
1504 clid = tcm->tcm_parent;
1507 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1513 if (clid != TC_H_ROOT) {
1514 if (clid != TC_H_INGRESS) {
1515 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1517 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1520 q = qdisc_leaf(p, clid);
1521 } else if (dev_ingress_queue_create(dev)) {
1522 q = dev_ingress_queue(dev)->qdisc_sleeping;
1528 /* It may be default qdisc, ignore it */
1529 if (q && q->handle == 0)
1532 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1533 if (tcm->tcm_handle) {
1534 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1535 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1538 if (TC_H_MIN(tcm->tcm_handle)) {
1539 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1542 q = qdisc_lookup(dev, tcm->tcm_handle);
1544 goto create_n_graft;
1545 if (n->nlmsg_flags & NLM_F_EXCL) {
1546 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1549 if (tca[TCA_KIND] &&
1550 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1551 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1555 (p && check_loop(q, p, 0))) {
1556 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1559 qdisc_refcount_inc(q);
1563 goto create_n_graft;
1565 /* This magic test requires explanation.
1567 * We know, that some child q is already
1568 * attached to this parent and have choice:
1569 * either to change it or to create/graft new one.
1571 * 1. We are allowed to create/graft only
1572 * if CREATE and REPLACE flags are set.
1574 * 2. If EXCL is set, requestor wanted to say,
1575 * that qdisc tcm_handle is not expected
1576 * to exist, so that we choose create/graft too.
1578 * 3. The last case is when no flags are set.
1579 * Alas, it is sort of hole in API, we
1580 * cannot decide what to do unambiguously.
1581 * For now we select create/graft, if
1582 * user gave KIND, which does not match existing.
1584 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1585 (n->nlmsg_flags & NLM_F_REPLACE) &&
1586 ((n->nlmsg_flags & NLM_F_EXCL) ||
1588 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1589 goto create_n_graft;
1593 if (!tcm->tcm_handle) {
1594 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1597 q = qdisc_lookup(dev, tcm->tcm_handle);
1600 /* Change qdisc parameters */
1602 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1605 if (n->nlmsg_flags & NLM_F_EXCL) {
1606 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1609 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1610 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1613 err = qdisc_change(q, tca, extack);
1615 qdisc_notify(net, skb, n, clid, NULL, q);
1619 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1620 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1623 if (clid == TC_H_INGRESS) {
1624 if (dev_ingress_queue(dev)) {
1625 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1626 tcm->tcm_parent, tcm->tcm_parent,
1629 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1633 struct netdev_queue *dev_queue;
1635 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1636 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1638 dev_queue = p->dev_queue;
1640 dev_queue = netdev_get_tx_queue(dev, 0);
1642 q = qdisc_create(dev, dev_queue, p,
1643 tcm->tcm_parent, tcm->tcm_handle,
1653 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1663 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1664 struct netlink_callback *cb,
1665 int *q_idx_p, int s_q_idx, bool recur,
1666 bool dump_invisible)
1668 int ret = 0, q_idx = *q_idx_p;
1676 if (q_idx < s_q_idx) {
1679 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1680 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1681 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1687 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1688 * itself has already been dumped.
1690 * If we've already dumped the top-level (ingress) qdisc above and the global
1691 * qdisc hashtable, we don't want to hit it again
1693 if (!qdisc_dev(root) || !recur)
1696 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1697 if (q_idx < s_q_idx) {
1701 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1702 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1703 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1717 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1719 struct net *net = sock_net(skb->sk);
1722 struct net_device *dev;
1723 const struct nlmsghdr *nlh = cb->nlh;
1724 struct nlattr *tca[TCA_MAX + 1];
1727 s_idx = cb->args[0];
1728 s_q_idx = q_idx = cb->args[1];
1733 err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1734 rtm_tca_policy, cb->extack);
1738 for_each_netdev(net, dev) {
1739 struct netdev_queue *dev_queue;
1747 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1748 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1751 dev_queue = dev_ingress_queue(dev);
1753 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1754 &q_idx, s_q_idx, false,
1755 tca[TCA_DUMP_INVISIBLE]) < 0)
1764 cb->args[1] = q_idx;
1771 /************************************************
1772 * Traffic classes manipulation. *
1773 ************************************************/
1775 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1777 u32 portid, u32 seq, u16 flags, int event)
1780 struct nlmsghdr *nlh;
1781 unsigned char *b = skb_tail_pointer(skb);
1783 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1786 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1788 goto out_nlmsg_trim;
1789 tcm = nlmsg_data(nlh);
1790 tcm->tcm_family = AF_UNSPEC;
1793 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1794 tcm->tcm_parent = q->handle;
1795 tcm->tcm_handle = q->handle;
1797 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1798 goto nla_put_failure;
1799 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1800 goto nla_put_failure;
1802 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1803 NULL, &d, TCA_PAD) < 0)
1804 goto nla_put_failure;
1806 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1807 goto nla_put_failure;
1809 if (gnet_stats_finish_copy(&d) < 0)
1810 goto nla_put_failure;
1812 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1821 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1822 struct nlmsghdr *n, struct Qdisc *q,
1823 unsigned long cl, int event)
1825 struct sk_buff *skb;
1826 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1829 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1833 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1838 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1839 n->nlmsg_flags & NLM_F_ECHO);
1845 static int tclass_del_notify(struct net *net,
1846 const struct Qdisc_class_ops *cops,
1847 struct sk_buff *oskb, struct nlmsghdr *n,
1848 struct Qdisc *q, unsigned long cl)
1850 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1851 struct sk_buff *skb;
1857 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1861 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1862 RTM_DELTCLASS) < 0) {
1867 err = cops->delete(q, cl);
1873 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1874 n->nlmsg_flags & NLM_F_ECHO);
1880 #ifdef CONFIG_NET_CLS
1882 struct tcf_bind_args {
1883 struct tcf_walker w;
1888 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1890 struct tcf_bind_args *a = (void *)arg;
1892 if (tp->ops->bind_class) {
1893 struct Qdisc *q = tcf_block_q(tp->chain->block);
1896 tp->ops->bind_class(n, a->classid, a->cl);
1902 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1903 unsigned long new_cl)
1905 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1906 struct tcf_block *block;
1907 struct tcf_chain *chain;
1910 cl = cops->find(q, portid);
1913 block = cops->tcf_block(q, cl, NULL);
1916 for (chain = tcf_get_next_chain(block, NULL);
1918 chain = tcf_get_next_chain(block, chain)) {
1919 struct tcf_proto *tp;
1921 for (tp = tcf_get_next_proto(chain, NULL, true);
1922 tp; tp = tcf_get_next_proto(chain, tp, true)) {
1923 struct tcf_bind_args arg = {};
1925 arg.w.fn = tcf_node_bind;
1928 tp->ops->walk(tp, &arg.w, true);
1935 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1936 unsigned long new_cl)
1942 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1943 struct netlink_ext_ack *extack)
1945 struct net *net = sock_net(skb->sk);
1946 struct tcmsg *tcm = nlmsg_data(n);
1947 struct nlattr *tca[TCA_MAX + 1];
1948 struct net_device *dev;
1949 struct Qdisc *q = NULL;
1950 const struct Qdisc_class_ops *cops;
1951 unsigned long cl = 0;
1952 unsigned long new_cl;
1958 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1959 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1962 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
1967 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1972 parent == TC_H_UNSPEC - unspecified parent.
1973 parent == TC_H_ROOT - class is root, which has no parent.
1974 parent == X:0 - parent is root class.
1975 parent == X:Y - parent is a node in hierarchy.
1976 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1978 handle == 0:0 - generate handle from kernel pool.
1979 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1980 handle == X:Y - clear.
1981 handle == X:0 - root class.
1984 /* Step 1. Determine qdisc handle X:0 */
1986 portid = tcm->tcm_parent;
1987 clid = tcm->tcm_handle;
1988 qid = TC_H_MAJ(clid);
1990 if (portid != TC_H_ROOT) {
1991 u32 qid1 = TC_H_MAJ(portid);
1994 /* If both majors are known, they must be identical. */
1999 } else if (qid == 0)
2000 qid = dev->qdisc->handle;
2002 /* Now qid is genuine qdisc handle consistent
2003 * both with parent and child.
2005 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2008 portid = TC_H_MAKE(qid, portid);
2011 qid = dev->qdisc->handle;
2014 /* OK. Locate qdisc */
2015 q = qdisc_lookup(dev, qid);
2019 /* An check that it supports classes */
2020 cops = q->ops->cl_ops;
2024 /* Now try to get class */
2026 if (portid == TC_H_ROOT)
2029 clid = TC_H_MAKE(qid, clid);
2032 cl = cops->find(q, clid);
2036 if (n->nlmsg_type != RTM_NEWTCLASS ||
2037 !(n->nlmsg_flags & NLM_F_CREATE))
2040 switch (n->nlmsg_type) {
2043 if (n->nlmsg_flags & NLM_F_EXCL)
2047 err = tclass_del_notify(net, cops, skb, n, q, cl);
2048 /* Unbind the class with flilters with 0 */
2049 tc_bind_tclass(q, portid, clid, 0);
2052 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
2060 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2061 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2068 err = cops->change(q, clid, portid, tca, &new_cl, extack);
2070 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
2071 /* We just create a new class, need to do reverse binding. */
2073 tc_bind_tclass(q, portid, clid, new_cl);
2079 struct qdisc_dump_args {
2080 struct qdisc_walker w;
2081 struct sk_buff *skb;
2082 struct netlink_callback *cb;
2085 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2086 struct qdisc_walker *arg)
2088 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2090 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2091 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2095 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2096 struct tcmsg *tcm, struct netlink_callback *cb,
2099 struct qdisc_dump_args arg;
2101 if (tc_qdisc_dump_ignore(q, false) ||
2102 *t_p < s_t || !q->ops->cl_ops ||
2104 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2109 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2110 arg.w.fn = qdisc_class_dump;
2114 arg.w.skip = cb->args[1];
2116 q->ops->cl_ops->walk(q, &arg.w);
2117 cb->args[1] = arg.w.count;
2124 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2125 struct tcmsg *tcm, struct netlink_callback *cb,
2134 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2137 if (!qdisc_dev(root))
2140 if (tcm->tcm_parent) {
2141 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2142 if (q && q != root &&
2143 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2147 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2148 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2155 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2157 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2158 struct net *net = sock_net(skb->sk);
2159 struct netdev_queue *dev_queue;
2160 struct net_device *dev;
2163 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2165 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2172 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
2175 dev_queue = dev_ingress_queue(dev);
2177 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2188 #ifdef CONFIG_PROC_FS
2189 static int psched_show(struct seq_file *seq, void *v)
2191 seq_printf(seq, "%08x %08x %08x %08x\n",
2192 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2194 (u32)NSEC_PER_SEC / hrtimer_resolution);
2199 static int __net_init psched_net_init(struct net *net)
2201 struct proc_dir_entry *e;
2203 e = proc_create_single("psched", 0, net->proc_net, psched_show);
2210 static void __net_exit psched_net_exit(struct net *net)
2212 remove_proc_entry("psched", net->proc_net);
2215 static int __net_init psched_net_init(struct net *net)
2220 static void __net_exit psched_net_exit(struct net *net)
2225 static struct pernet_operations psched_net_ops = {
2226 .init = psched_net_init,
2227 .exit = psched_net_exit,
2230 static int __init pktsched_init(void)
2234 err = register_pernet_subsys(&psched_net_ops);
2236 pr_err("pktsched_init: "
2237 "cannot initialize per netns operations\n");
2241 register_qdisc(&pfifo_fast_ops);
2242 register_qdisc(&pfifo_qdisc_ops);
2243 register_qdisc(&bfifo_qdisc_ops);
2244 register_qdisc(&pfifo_head_drop_qdisc_ops);
2245 register_qdisc(&mq_qdisc_ops);
2246 register_qdisc(&noqueue_qdisc_ops);
2248 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2249 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2250 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2252 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2253 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2254 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2260 subsys_initcall(pktsched_init);