1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_api.c Packet classifier API.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/rhashtable.h>
24 #include <net/net_namespace.h>
26 #include <net/netlink.h>
27 #include <net/pkt_sched.h>
28 #include <net/pkt_cls.h>
29 #include <net/tc_act/tc_pedit.h>
30 #include <net/tc_act/tc_mirred.h>
31 #include <net/tc_act/tc_vlan.h>
32 #include <net/tc_act/tc_tunnel_key.h>
33 #include <net/tc_act/tc_csum.h>
34 #include <net/tc_act/tc_gact.h>
35 #include <net/tc_act/tc_police.h>
36 #include <net/tc_act/tc_sample.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <net/tc_act/tc_ct.h>
40 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
42 /* The list of all installed classifier types */
43 static LIST_HEAD(tcf_proto_base);
45 /* Protects list of registered TC modules. It is pure SMP lock. */
46 static DEFINE_RWLOCK(cls_mod_lock);
48 /* Find classifier type by string name */
50 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
52 const struct tcf_proto_ops *t, *res = NULL;
55 read_lock(&cls_mod_lock);
56 list_for_each_entry(t, &tcf_proto_base, head) {
57 if (strcmp(kind, t->kind) == 0) {
58 if (try_module_get(t->owner))
63 read_unlock(&cls_mod_lock);
68 static const struct tcf_proto_ops *
69 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
70 struct netlink_ext_ack *extack)
72 const struct tcf_proto_ops *ops;
74 ops = __tcf_proto_lookup_ops(kind);
80 request_module("cls_%s", kind);
83 ops = __tcf_proto_lookup_ops(kind);
84 /* We dropped the RTNL semaphore in order to perform
85 * the module load. So, even if we succeeded in loading
86 * the module we have to replay the request. We indicate
90 module_put(ops->owner);
91 return ERR_PTR(-EAGAIN);
94 NL_SET_ERR_MSG(extack, "TC classifier not found");
95 return ERR_PTR(-ENOENT);
98 /* Register(unregister) new classifier type */
100 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
102 struct tcf_proto_ops *t;
105 write_lock(&cls_mod_lock);
106 list_for_each_entry(t, &tcf_proto_base, head)
107 if (!strcmp(ops->kind, t->kind))
110 list_add_tail(&ops->head, &tcf_proto_base);
113 write_unlock(&cls_mod_lock);
116 EXPORT_SYMBOL(register_tcf_proto_ops);
118 static struct workqueue_struct *tc_filter_wq;
120 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
122 struct tcf_proto_ops *t;
125 /* Wait for outstanding call_rcu()s, if any, from a
126 * tcf_proto_ops's destroy() handler.
129 flush_workqueue(tc_filter_wq);
131 write_lock(&cls_mod_lock);
132 list_for_each_entry(t, &tcf_proto_base, head) {
139 write_unlock(&cls_mod_lock);
142 EXPORT_SYMBOL(unregister_tcf_proto_ops);
144 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
146 INIT_RCU_WORK(rwork, func);
147 return queue_rcu_work(tc_filter_wq, rwork);
149 EXPORT_SYMBOL(tcf_queue_work);
151 /* Select new prio value from the range, managed by kernel. */
153 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
155 u32 first = TC_H_MAKE(0xC0000000U, 0U);
158 first = tp->prio - 1;
160 return TC_H_MAJ(first);
163 static bool tcf_proto_is_unlocked(const char *kind)
165 const struct tcf_proto_ops *ops;
168 ops = tcf_proto_lookup_ops(kind, false, NULL);
169 /* On error return false to take rtnl lock. Proto lookup/create
170 * functions will perform lookup again and properly handle errors.
175 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
176 module_put(ops->owner);
180 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
181 u32 prio, struct tcf_chain *chain,
183 struct netlink_ext_ack *extack)
185 struct tcf_proto *tp;
188 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
190 return ERR_PTR(-ENOBUFS);
192 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
193 if (IS_ERR(tp->ops)) {
194 err = PTR_ERR(tp->ops);
197 tp->classify = tp->ops->classify;
198 tp->protocol = protocol;
201 spin_lock_init(&tp->lock);
202 refcount_set(&tp->refcnt, 1);
204 err = tp->ops->init(tp);
206 module_put(tp->ops->owner);
216 static void tcf_proto_get(struct tcf_proto *tp)
218 refcount_inc(&tp->refcnt);
221 static void tcf_chain_put(struct tcf_chain *chain);
223 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
224 struct netlink_ext_ack *extack)
226 tp->ops->destroy(tp, rtnl_held, extack);
227 tcf_chain_put(tp->chain);
228 module_put(tp->ops->owner);
232 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
233 struct netlink_ext_ack *extack)
235 if (refcount_dec_and_test(&tp->refcnt))
236 tcf_proto_destroy(tp, rtnl_held, extack);
239 static int walker_check_empty(struct tcf_proto *tp, void *fh,
240 struct tcf_walker *arg)
243 arg->nonempty = true;
249 static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
251 struct tcf_walker walker = { .fn = walker_check_empty, };
254 tp->ops->walk(tp, &walker, rtnl_held);
255 return !walker.nonempty;
260 static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
262 spin_lock(&tp->lock);
263 if (tcf_proto_is_empty(tp, rtnl_held))
265 spin_unlock(&tp->lock);
269 static void tcf_proto_mark_delete(struct tcf_proto *tp)
271 spin_lock(&tp->lock);
273 spin_unlock(&tp->lock);
276 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
280 spin_lock(&tp->lock);
281 deleting = tp->deleting;
282 spin_unlock(&tp->lock);
287 #define ASSERT_BLOCK_LOCKED(block) \
288 lockdep_assert_held(&(block)->lock)
290 struct tcf_filter_chain_list_item {
291 struct list_head list;
292 tcf_chain_head_change_t *chain_head_change;
293 void *chain_head_change_priv;
296 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
299 struct tcf_chain *chain;
301 ASSERT_BLOCK_LOCKED(block);
303 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
306 list_add_tail(&chain->list, &block->chain_list);
307 mutex_init(&chain->filter_chain_lock);
308 chain->block = block;
309 chain->index = chain_index;
312 block->chain0.chain = chain;
316 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
317 struct tcf_proto *tp_head)
319 if (item->chain_head_change)
320 item->chain_head_change(tp_head, item->chain_head_change_priv);
323 static void tcf_chain0_head_change(struct tcf_chain *chain,
324 struct tcf_proto *tp_head)
326 struct tcf_filter_chain_list_item *item;
327 struct tcf_block *block = chain->block;
332 mutex_lock(&block->lock);
333 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
334 tcf_chain_head_change_item(item, tp_head);
335 mutex_unlock(&block->lock);
338 /* Returns true if block can be safely freed. */
340 static bool tcf_chain_detach(struct tcf_chain *chain)
342 struct tcf_block *block = chain->block;
344 ASSERT_BLOCK_LOCKED(block);
346 list_del(&chain->list);
348 block->chain0.chain = NULL;
350 if (list_empty(&block->chain_list) &&
351 refcount_read(&block->refcnt) == 0)
357 static void tcf_block_destroy(struct tcf_block *block)
359 mutex_destroy(&block->lock);
360 kfree_rcu(block, rcu);
363 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
365 struct tcf_block *block = chain->block;
367 mutex_destroy(&chain->filter_chain_lock);
368 kfree_rcu(chain, rcu);
370 tcf_block_destroy(block);
373 static void tcf_chain_hold(struct tcf_chain *chain)
375 ASSERT_BLOCK_LOCKED(chain->block);
380 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
382 ASSERT_BLOCK_LOCKED(chain->block);
384 /* In case all the references are action references, this
385 * chain should not be shown to the user.
387 return chain->refcnt == chain->action_refcnt;
390 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
393 struct tcf_chain *chain;
395 ASSERT_BLOCK_LOCKED(block);
397 list_for_each_entry(chain, &block->chain_list, list) {
398 if (chain->index == chain_index)
404 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
405 u32 seq, u16 flags, int event, bool unicast);
407 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
408 u32 chain_index, bool create,
411 struct tcf_chain *chain = NULL;
412 bool is_first_reference;
414 mutex_lock(&block->lock);
415 chain = tcf_chain_lookup(block, chain_index);
417 tcf_chain_hold(chain);
421 chain = tcf_chain_create(block, chain_index);
427 ++chain->action_refcnt;
428 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
429 mutex_unlock(&block->lock);
431 /* Send notification only in case we got the first
432 * non-action reference. Until then, the chain acts only as
433 * a placeholder for actions pointing to it and user ought
434 * not know about them.
436 if (is_first_reference && !by_act)
437 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
438 RTM_NEWCHAIN, false);
443 mutex_unlock(&block->lock);
447 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
450 return __tcf_chain_get(block, chain_index, create, false);
453 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
455 return __tcf_chain_get(block, chain_index, true, true);
457 EXPORT_SYMBOL(tcf_chain_get_by_act);
459 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
461 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
462 void *tmplt_priv, u32 chain_index,
463 struct tcf_block *block, struct sk_buff *oskb,
464 u32 seq, u16 flags, bool unicast);
466 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
467 bool explicitly_created)
469 struct tcf_block *block = chain->block;
470 const struct tcf_proto_ops *tmplt_ops;
471 bool free_block = false;
475 mutex_lock(&block->lock);
476 if (explicitly_created) {
477 if (!chain->explicitly_created) {
478 mutex_unlock(&block->lock);
481 chain->explicitly_created = false;
485 chain->action_refcnt--;
487 /* tc_chain_notify_delete can't be called while holding block lock.
488 * However, when block is unlocked chain can be changed concurrently, so
489 * save these to temporary variables.
491 refcnt = --chain->refcnt;
492 tmplt_ops = chain->tmplt_ops;
493 tmplt_priv = chain->tmplt_priv;
495 /* The last dropped non-action reference will trigger notification. */
496 if (refcnt - chain->action_refcnt == 0 && !by_act) {
497 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
498 block, NULL, 0, 0, false);
499 /* Last reference to chain, no need to lock. */
500 chain->flushing = false;
504 free_block = tcf_chain_detach(chain);
505 mutex_unlock(&block->lock);
508 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
509 tcf_chain_destroy(chain, free_block);
513 static void tcf_chain_put(struct tcf_chain *chain)
515 __tcf_chain_put(chain, false, false);
518 void tcf_chain_put_by_act(struct tcf_chain *chain)
520 __tcf_chain_put(chain, true, false);
522 EXPORT_SYMBOL(tcf_chain_put_by_act);
524 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
526 __tcf_chain_put(chain, false, true);
529 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
531 struct tcf_proto *tp, *tp_next;
533 mutex_lock(&chain->filter_chain_lock);
534 tp = tcf_chain_dereference(chain->filter_chain, chain);
535 RCU_INIT_POINTER(chain->filter_chain, NULL);
536 tcf_chain0_head_change(chain, NULL);
537 chain->flushing = true;
538 mutex_unlock(&chain->filter_chain_lock);
541 tp_next = rcu_dereference_protected(tp->next, 1);
542 tcf_proto_put(tp, rtnl_held, NULL);
547 static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
549 const struct Qdisc_class_ops *cops;
552 if (!dev_ingress_queue(dev))
555 qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
559 cops = qdisc->ops->cl_ops;
563 if (!cops->tcf_block)
566 return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
569 static struct rhashtable indr_setup_block_ht;
571 struct tc_indr_block_dev {
572 struct rhash_head ht_node;
573 struct net_device *dev;
575 struct list_head cb_list;
576 struct tcf_block *block;
579 struct tc_indr_block_cb {
580 struct list_head list;
582 tc_indr_block_bind_cb_t *cb;
586 static const struct rhashtable_params tc_indr_setup_block_ht_params = {
587 .key_offset = offsetof(struct tc_indr_block_dev, dev),
588 .head_offset = offsetof(struct tc_indr_block_dev, ht_node),
589 .key_len = sizeof(struct net_device *),
592 static struct tc_indr_block_dev *
593 tc_indr_block_dev_lookup(struct net_device *dev)
595 return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
596 tc_indr_setup_block_ht_params);
599 static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev)
601 struct tc_indr_block_dev *indr_dev;
603 indr_dev = tc_indr_block_dev_lookup(dev);
607 indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
611 INIT_LIST_HEAD(&indr_dev->cb_list);
613 indr_dev->block = tc_dev_ingress_block(dev);
614 if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
615 tc_indr_setup_block_ht_params)) {
625 static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev)
627 if (--indr_dev->refcnt)
630 rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
631 tc_indr_setup_block_ht_params);
635 static struct tc_indr_block_cb *
636 tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev,
637 tc_indr_block_bind_cb_t *cb, void *cb_ident)
639 struct tc_indr_block_cb *indr_block_cb;
641 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
642 if (indr_block_cb->cb == cb &&
643 indr_block_cb->cb_ident == cb_ident)
644 return indr_block_cb;
648 static struct tc_indr_block_cb *
649 tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv,
650 tc_indr_block_bind_cb_t *cb, void *cb_ident)
652 struct tc_indr_block_cb *indr_block_cb;
654 indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
656 return ERR_PTR(-EEXIST);
658 indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
660 return ERR_PTR(-ENOMEM);
662 indr_block_cb->cb_priv = cb_priv;
663 indr_block_cb->cb = cb;
664 indr_block_cb->cb_ident = cb_ident;
665 list_add(&indr_block_cb->list, &indr_dev->cb_list);
667 return indr_block_cb;
670 static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
672 list_del(&indr_block_cb->list);
673 kfree(indr_block_cb);
676 static int tcf_block_setup(struct tcf_block *block,
677 struct flow_block_offload *bo);
679 static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
680 struct tc_indr_block_cb *indr_block_cb,
681 enum flow_block_command command)
683 struct flow_block_offload bo = {
685 .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
686 .net = dev_net(indr_dev->dev),
687 .block_shared = tcf_block_non_null_shared(indr_dev->block),
689 INIT_LIST_HEAD(&bo.cb_list);
691 if (!indr_dev->block)
694 indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
696 tcf_block_setup(indr_dev->block, &bo);
699 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
700 tc_indr_block_bind_cb_t *cb, void *cb_ident)
702 struct tc_indr_block_cb *indr_block_cb;
703 struct tc_indr_block_dev *indr_dev;
706 indr_dev = tc_indr_block_dev_get(dev);
710 indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
711 err = PTR_ERR_OR_ZERO(indr_block_cb);
715 tc_indr_block_ing_cmd(indr_dev, indr_block_cb, FLOW_BLOCK_BIND);
719 tc_indr_block_dev_put(indr_dev);
722 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register);
724 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
725 tc_indr_block_bind_cb_t *cb, void *cb_ident)
730 err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
735 EXPORT_SYMBOL_GPL(tc_indr_block_cb_register);
737 void __tc_indr_block_cb_unregister(struct net_device *dev,
738 tc_indr_block_bind_cb_t *cb, void *cb_ident)
740 struct tc_indr_block_cb *indr_block_cb;
741 struct tc_indr_block_dev *indr_dev;
743 indr_dev = tc_indr_block_dev_lookup(dev);
747 indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
751 /* Send unbind message if required to free any block cbs. */
752 tc_indr_block_ing_cmd(indr_dev, indr_block_cb, FLOW_BLOCK_UNBIND);
753 tc_indr_block_cb_del(indr_block_cb);
754 tc_indr_block_dev_put(indr_dev);
756 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister);
758 void tc_indr_block_cb_unregister(struct net_device *dev,
759 tc_indr_block_bind_cb_t *cb, void *cb_ident)
762 __tc_indr_block_cb_unregister(dev, cb, cb_ident);
765 EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister);
767 static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
768 struct tcf_block_ext_info *ei,
769 enum flow_block_command command,
770 struct netlink_ext_ack *extack)
772 struct tc_indr_block_cb *indr_block_cb;
773 struct tc_indr_block_dev *indr_dev;
774 struct flow_block_offload bo = {
776 .binder_type = ei->binder_type,
778 .block_shared = tcf_block_shared(block),
781 INIT_LIST_HEAD(&bo.cb_list);
783 indr_dev = tc_indr_block_dev_lookup(dev);
787 indr_dev->block = command == FLOW_BLOCK_BIND ? block : NULL;
789 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
790 indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
793 tcf_block_setup(block, &bo);
796 static bool tcf_block_offload_in_use(struct tcf_block *block)
798 return block->offloadcnt;
801 static int tcf_block_offload_cmd(struct tcf_block *block,
802 struct net_device *dev,
803 struct tcf_block_ext_info *ei,
804 enum flow_block_command command,
805 struct netlink_ext_ack *extack)
807 struct flow_block_offload bo = {};
810 bo.net = dev_net(dev);
811 bo.command = command;
812 bo.binder_type = ei->binder_type;
813 bo.block_shared = tcf_block_shared(block);
815 INIT_LIST_HEAD(&bo.cb_list);
817 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
821 return tcf_block_setup(block, &bo);
824 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
825 struct tcf_block_ext_info *ei,
826 struct netlink_ext_ack *extack)
828 struct net_device *dev = q->dev_queue->dev;
831 if (!dev->netdev_ops->ndo_setup_tc)
832 goto no_offload_dev_inc;
834 /* If tc offload feature is disabled and the block we try to bind
835 * to already has some offloaded filters, forbid to bind.
837 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
838 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
842 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
843 if (err == -EOPNOTSUPP)
844 goto no_offload_dev_inc;
848 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
852 if (tcf_block_offload_in_use(block))
854 block->nooffloaddevcnt++;
855 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
859 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
860 struct tcf_block_ext_info *ei)
862 struct net_device *dev = q->dev_queue->dev;
865 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
867 if (!dev->netdev_ops->ndo_setup_tc)
868 goto no_offload_dev_dec;
869 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
870 if (err == -EOPNOTSUPP)
871 goto no_offload_dev_dec;
875 WARN_ON(block->nooffloaddevcnt-- == 0);
879 tcf_chain0_head_change_cb_add(struct tcf_block *block,
880 struct tcf_block_ext_info *ei,
881 struct netlink_ext_ack *extack)
883 struct tcf_filter_chain_list_item *item;
884 struct tcf_chain *chain0;
886 item = kmalloc(sizeof(*item), GFP_KERNEL);
888 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
891 item->chain_head_change = ei->chain_head_change;
892 item->chain_head_change_priv = ei->chain_head_change_priv;
894 mutex_lock(&block->lock);
895 chain0 = block->chain0.chain;
897 tcf_chain_hold(chain0);
899 list_add(&item->list, &block->chain0.filter_chain_list);
900 mutex_unlock(&block->lock);
903 struct tcf_proto *tp_head;
905 mutex_lock(&chain0->filter_chain_lock);
907 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
909 tcf_chain_head_change_item(item, tp_head);
911 mutex_lock(&block->lock);
912 list_add(&item->list, &block->chain0.filter_chain_list);
913 mutex_unlock(&block->lock);
915 mutex_unlock(&chain0->filter_chain_lock);
916 tcf_chain_put(chain0);
923 tcf_chain0_head_change_cb_del(struct tcf_block *block,
924 struct tcf_block_ext_info *ei)
926 struct tcf_filter_chain_list_item *item;
928 mutex_lock(&block->lock);
929 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
930 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
931 (item->chain_head_change == ei->chain_head_change &&
932 item->chain_head_change_priv == ei->chain_head_change_priv)) {
933 if (block->chain0.chain)
934 tcf_chain_head_change_item(item, NULL);
935 list_del(&item->list);
936 mutex_unlock(&block->lock);
942 mutex_unlock(&block->lock);
947 spinlock_t idr_lock; /* Protects idr */
951 static unsigned int tcf_net_id;
953 static int tcf_block_insert(struct tcf_block *block, struct net *net,
954 struct netlink_ext_ack *extack)
956 struct tcf_net *tn = net_generic(net, tcf_net_id);
959 idr_preload(GFP_KERNEL);
960 spin_lock(&tn->idr_lock);
961 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
963 spin_unlock(&tn->idr_lock);
969 static void tcf_block_remove(struct tcf_block *block, struct net *net)
971 struct tcf_net *tn = net_generic(net, tcf_net_id);
973 spin_lock(&tn->idr_lock);
974 idr_remove(&tn->idr, block->index);
975 spin_unlock(&tn->idr_lock);
978 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
980 struct netlink_ext_ack *extack)
982 struct tcf_block *block;
984 block = kzalloc(sizeof(*block), GFP_KERNEL);
986 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
987 return ERR_PTR(-ENOMEM);
989 mutex_init(&block->lock);
990 INIT_LIST_HEAD(&block->chain_list);
991 INIT_LIST_HEAD(&block->cb_list);
992 INIT_LIST_HEAD(&block->owner_list);
993 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
995 refcount_set(&block->refcnt, 1);
997 block->index = block_index;
999 /* Don't store q pointer for blocks which are shared */
1000 if (!tcf_block_shared(block))
1005 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1007 struct tcf_net *tn = net_generic(net, tcf_net_id);
1009 return idr_find(&tn->idr, block_index);
1012 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1014 struct tcf_block *block;
1017 block = tcf_block_lookup(net, block_index);
1018 if (block && !refcount_inc_not_zero(&block->refcnt))
1025 static struct tcf_chain *
1026 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1028 mutex_lock(&block->lock);
1030 chain = list_is_last(&chain->list, &block->chain_list) ?
1031 NULL : list_next_entry(chain, list);
1033 chain = list_first_entry_or_null(&block->chain_list,
1034 struct tcf_chain, list);
1036 /* skip all action-only chains */
1037 while (chain && tcf_chain_held_by_acts_only(chain))
1038 chain = list_is_last(&chain->list, &block->chain_list) ?
1039 NULL : list_next_entry(chain, list);
1042 tcf_chain_hold(chain);
1043 mutex_unlock(&block->lock);
1048 /* Function to be used by all clients that want to iterate over all chains on
1049 * block. It properly obtains block->lock and takes reference to chain before
1050 * returning it. Users of this function must be tolerant to concurrent chain
1051 * insertion/deletion or ensure that no concurrent chain modification is
1052 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1053 * consistent dump because rtnl lock is released each time skb is filled with
1054 * data and sent to user-space.
1058 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1060 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1063 tcf_chain_put(chain);
1067 EXPORT_SYMBOL(tcf_get_next_chain);
1069 static struct tcf_proto *
1070 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1075 mutex_lock(&chain->filter_chain_lock);
1078 tp = tcf_chain_dereference(chain->filter_chain, chain);
1079 } else if (tcf_proto_is_deleting(tp)) {
1080 /* 'deleting' flag is set and chain->filter_chain_lock was
1081 * unlocked, which means next pointer could be invalid. Restart
1084 prio = tp->prio + 1;
1085 tp = tcf_chain_dereference(chain->filter_chain, chain);
1087 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1088 if (!tp->deleting && tp->prio >= prio)
1091 tp = tcf_chain_dereference(tp->next, chain);
1097 mutex_unlock(&chain->filter_chain_lock);
1102 /* Function to be used by all clients that want to iterate over all tp's on
1103 * chain. Users of this function must be tolerant to concurrent tp
1104 * insertion/deletion or ensure that no concurrent chain modification is
1105 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1106 * consistent dump because rtnl lock is released each time skb is filled with
1107 * data and sent to user-space.
1111 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
1114 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1117 tcf_proto_put(tp, rtnl_held, NULL);
1121 EXPORT_SYMBOL(tcf_get_next_proto);
1123 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1125 struct tcf_chain *chain;
1127 /* Last reference to block. At this point chains cannot be added or
1128 * removed concurrently.
1130 for (chain = tcf_get_next_chain(block, NULL);
1132 chain = tcf_get_next_chain(block, chain)) {
1133 tcf_chain_put_explicitly_created(chain);
1134 tcf_chain_flush(chain, rtnl_held);
1138 /* Lookup Qdisc and increments its reference counter.
1139 * Set parent, if necessary.
1142 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1143 u32 *parent, int ifindex, bool rtnl_held,
1144 struct netlink_ext_ack *extack)
1146 const struct Qdisc_class_ops *cops;
1147 struct net_device *dev;
1150 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1156 dev = dev_get_by_index_rcu(net, ifindex);
1165 *parent = (*q)->handle;
1167 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1169 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1175 *q = qdisc_refcount_inc_nz(*q);
1177 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1182 /* Is it classful? */
1183 cops = (*q)->ops->cl_ops;
1185 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1190 if (!cops->tcf_block) {
1191 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1197 /* At this point we know that qdisc is not noop_qdisc,
1198 * which means that qdisc holds a reference to net_device
1199 * and we hold a reference to qdisc, so it is safe to release
1211 qdisc_put_unlocked(*q);
1217 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1218 int ifindex, struct netlink_ext_ack *extack)
1220 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1223 /* Do we search for filter, attached to class? */
1224 if (TC_H_MIN(parent)) {
1225 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1227 *cl = cops->find(q, parent);
1229 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1237 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1238 unsigned long cl, int ifindex,
1240 struct netlink_ext_ack *extack)
1242 struct tcf_block *block;
1244 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1245 block = tcf_block_refcnt_get(net, block_index);
1247 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1248 return ERR_PTR(-EINVAL);
1251 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1253 block = cops->tcf_block(q, cl, extack);
1255 return ERR_PTR(-EINVAL);
1257 if (tcf_block_shared(block)) {
1258 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1259 return ERR_PTR(-EOPNOTSUPP);
1262 /* Always take reference to block in order to support execution
1263 * of rules update path of cls API without rtnl lock. Caller
1264 * must release block when it is finished using it. 'if' block
1265 * of this conditional obtain reference to block by calling
1266 * tcf_block_refcnt_get().
1268 refcount_inc(&block->refcnt);
1274 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1275 struct tcf_block_ext_info *ei, bool rtnl_held)
1277 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1278 /* Flushing/putting all chains will cause the block to be
1279 * deallocated when last chain is freed. However, if chain_list
1280 * is empty, block has to be manually deallocated. After block
1281 * reference counter reached 0, it is no longer possible to
1282 * increment it or add new chains to block.
1284 bool free_block = list_empty(&block->chain_list);
1286 mutex_unlock(&block->lock);
1287 if (tcf_block_shared(block))
1288 tcf_block_remove(block, block->net);
1291 tcf_block_offload_unbind(block, q, ei);
1294 tcf_block_destroy(block);
1296 tcf_block_flush_all_chains(block, rtnl_held);
1298 tcf_block_offload_unbind(block, q, ei);
1302 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1304 __tcf_block_put(block, NULL, NULL, rtnl_held);
1308 * Set q, parent, cl when appropriate.
1311 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1312 u32 *parent, unsigned long *cl,
1313 int ifindex, u32 block_index,
1314 struct netlink_ext_ack *extack)
1316 struct tcf_block *block;
1321 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1325 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1329 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1330 if (IS_ERR(block)) {
1331 err = PTR_ERR(block);
1342 return ERR_PTR(err);
1345 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1348 if (!IS_ERR_OR_NULL(block))
1349 tcf_block_refcnt_put(block, rtnl_held);
1355 qdisc_put_unlocked(q);
1359 struct tcf_block_owner_item {
1360 struct list_head list;
1362 enum flow_block_binder_type binder_type;
1366 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1368 enum flow_block_binder_type binder_type)
1370 if (block->keep_dst &&
1371 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1372 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1373 netif_keep_dst(qdisc_dev(q));
1376 void tcf_block_netif_keep_dst(struct tcf_block *block)
1378 struct tcf_block_owner_item *item;
1380 block->keep_dst = true;
1381 list_for_each_entry(item, &block->owner_list, list)
1382 tcf_block_owner_netif_keep_dst(block, item->q,
1385 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1387 static int tcf_block_owner_add(struct tcf_block *block,
1389 enum flow_block_binder_type binder_type)
1391 struct tcf_block_owner_item *item;
1393 item = kmalloc(sizeof(*item), GFP_KERNEL);
1397 item->binder_type = binder_type;
1398 list_add(&item->list, &block->owner_list);
1402 static void tcf_block_owner_del(struct tcf_block *block,
1404 enum flow_block_binder_type binder_type)
1406 struct tcf_block_owner_item *item;
1408 list_for_each_entry(item, &block->owner_list, list) {
1409 if (item->q == q && item->binder_type == binder_type) {
1410 list_del(&item->list);
1418 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1419 struct tcf_block_ext_info *ei,
1420 struct netlink_ext_ack *extack)
1422 struct net *net = qdisc_net(q);
1423 struct tcf_block *block = NULL;
1426 if (ei->block_index)
1427 /* block_index not 0 means the shared block is requested */
1428 block = tcf_block_refcnt_get(net, ei->block_index);
1431 block = tcf_block_create(net, q, ei->block_index, extack);
1433 return PTR_ERR(block);
1434 if (tcf_block_shared(block)) {
1435 err = tcf_block_insert(block, net, extack);
1437 goto err_block_insert;
1441 err = tcf_block_owner_add(block, q, ei->binder_type);
1443 goto err_block_owner_add;
1445 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1447 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1449 goto err_chain0_head_change_cb_add;
1451 err = tcf_block_offload_bind(block, q, ei, extack);
1453 goto err_block_offload_bind;
1458 err_block_offload_bind:
1459 tcf_chain0_head_change_cb_del(block, ei);
1460 err_chain0_head_change_cb_add:
1461 tcf_block_owner_del(block, q, ei->binder_type);
1462 err_block_owner_add:
1464 tcf_block_refcnt_put(block, true);
1467 EXPORT_SYMBOL(tcf_block_get_ext);
1469 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1471 struct tcf_proto __rcu **p_filter_chain = priv;
1473 rcu_assign_pointer(*p_filter_chain, tp_head);
1476 int tcf_block_get(struct tcf_block **p_block,
1477 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1478 struct netlink_ext_ack *extack)
1480 struct tcf_block_ext_info ei = {
1481 .chain_head_change = tcf_chain_head_change_dflt,
1482 .chain_head_change_priv = p_filter_chain,
1485 WARN_ON(!p_filter_chain);
1486 return tcf_block_get_ext(p_block, q, &ei, extack);
1488 EXPORT_SYMBOL(tcf_block_get);
1490 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1491 * actions should be all removed after flushing.
1493 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1494 struct tcf_block_ext_info *ei)
1498 tcf_chain0_head_change_cb_del(block, ei);
1499 tcf_block_owner_del(block, q, ei->binder_type);
1501 __tcf_block_put(block, q, ei, true);
1503 EXPORT_SYMBOL(tcf_block_put_ext);
1505 void tcf_block_put(struct tcf_block *block)
1507 struct tcf_block_ext_info ei = {0, };
1511 tcf_block_put_ext(block, block->q, &ei);
1514 EXPORT_SYMBOL(tcf_block_put);
1517 tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
1518 void *cb_priv, bool add, bool offload_in_use,
1519 struct netlink_ext_ack *extack)
1521 struct tcf_chain *chain, *chain_prev;
1522 struct tcf_proto *tp, *tp_prev;
1525 for (chain = __tcf_get_next_chain(block, NULL);
1528 chain = __tcf_get_next_chain(block, chain),
1529 tcf_chain_put(chain_prev)) {
1530 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1532 tp = __tcf_get_next_proto(chain, tp),
1533 tcf_proto_put(tp_prev, true, NULL)) {
1534 if (tp->ops->reoffload) {
1535 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1538 goto err_playback_remove;
1539 } else if (add && offload_in_use) {
1541 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1542 goto err_playback_remove;
1549 err_playback_remove:
1550 tcf_proto_put(tp, true, NULL);
1551 tcf_chain_put(chain);
1552 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1557 static int tcf_block_bind(struct tcf_block *block,
1558 struct flow_block_offload *bo)
1560 struct flow_block_cb *block_cb, *next;
1563 list_for_each_entry(block_cb, &bo->cb_list, list) {
1564 err = tcf_block_playback_offloads(block, block_cb->cb,
1565 block_cb->cb_priv, true,
1566 tcf_block_offload_in_use(block),
1573 list_splice(&bo->cb_list, &block->cb_list);
1578 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1580 list_del(&block_cb->list);
1581 tcf_block_playback_offloads(block, block_cb->cb,
1582 block_cb->cb_priv, false,
1583 tcf_block_offload_in_use(block),
1586 flow_block_cb_free(block_cb);
1592 static void tcf_block_unbind(struct tcf_block *block,
1593 struct flow_block_offload *bo)
1595 struct flow_block_cb *block_cb, *next;
1597 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1598 tcf_block_playback_offloads(block, block_cb->cb,
1599 block_cb->cb_priv, false,
1600 tcf_block_offload_in_use(block),
1602 list_del(&block_cb->list);
1603 flow_block_cb_free(block_cb);
1607 static int tcf_block_setup(struct tcf_block *block,
1608 struct flow_block_offload *bo)
1612 switch (bo->command) {
1613 case FLOW_BLOCK_BIND:
1614 err = tcf_block_bind(block, bo);
1616 case FLOW_BLOCK_UNBIND:
1618 tcf_block_unbind(block, bo);
1628 /* Main classifier routine: scans classifier chain attached
1629 * to this qdisc, (optionally) tests for protocol and asks
1630 * specific classifiers.
1632 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1633 struct tcf_result *res, bool compat_mode)
1635 #ifdef CONFIG_NET_CLS_ACT
1636 const int max_reclassify_loop = 4;
1637 const struct tcf_proto *orig_tp = tp;
1638 const struct tcf_proto *first_tp;
1643 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1644 __be16 protocol = tc_skb_protocol(skb);
1647 if (tp->protocol != protocol &&
1648 tp->protocol != htons(ETH_P_ALL))
1651 err = tp->classify(skb, tp, res);
1652 #ifdef CONFIG_NET_CLS_ACT
1653 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1656 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1657 first_tp = res->goto_tp;
1665 return TC_ACT_UNSPEC; /* signal: continue lookup */
1666 #ifdef CONFIG_NET_CLS_ACT
1668 if (unlikely(limit++ >= max_reclassify_loop)) {
1669 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1670 tp->chain->block->index,
1672 ntohs(tp->protocol));
1680 EXPORT_SYMBOL(tcf_classify);
1682 struct tcf_chain_info {
1683 struct tcf_proto __rcu **pprev;
1684 struct tcf_proto __rcu *next;
1687 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1688 struct tcf_chain_info *chain_info)
1690 return tcf_chain_dereference(*chain_info->pprev, chain);
1693 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1694 struct tcf_chain_info *chain_info,
1695 struct tcf_proto *tp)
1697 if (chain->flushing)
1700 if (*chain_info->pprev == chain->filter_chain)
1701 tcf_chain0_head_change(chain, tp);
1703 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1704 rcu_assign_pointer(*chain_info->pprev, tp);
1709 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1710 struct tcf_chain_info *chain_info,
1711 struct tcf_proto *tp)
1713 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1715 tcf_proto_mark_delete(tp);
1716 if (tp == chain->filter_chain)
1717 tcf_chain0_head_change(chain, next);
1718 RCU_INIT_POINTER(*chain_info->pprev, next);
1721 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1722 struct tcf_chain_info *chain_info,
1723 u32 protocol, u32 prio,
1724 bool prio_allocate);
1726 /* Try to insert new proto.
1727 * If proto with specified priority already exists, free new proto
1728 * and return existing one.
1731 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1732 struct tcf_proto *tp_new,
1733 u32 protocol, u32 prio,
1736 struct tcf_chain_info chain_info;
1737 struct tcf_proto *tp;
1740 mutex_lock(&chain->filter_chain_lock);
1742 tp = tcf_chain_tp_find(chain, &chain_info,
1743 protocol, prio, false);
1745 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1746 mutex_unlock(&chain->filter_chain_lock);
1749 tcf_proto_destroy(tp_new, rtnl_held, NULL);
1752 tcf_proto_destroy(tp_new, rtnl_held, NULL);
1753 tp_new = ERR_PTR(err);
1759 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1760 struct tcf_proto *tp, bool rtnl_held,
1761 struct netlink_ext_ack *extack)
1763 struct tcf_chain_info chain_info;
1764 struct tcf_proto *tp_iter;
1765 struct tcf_proto **pprev;
1766 struct tcf_proto *next;
1768 mutex_lock(&chain->filter_chain_lock);
1770 /* Atomically find and remove tp from chain. */
1771 for (pprev = &chain->filter_chain;
1772 (tp_iter = tcf_chain_dereference(*pprev, chain));
1773 pprev = &tp_iter->next) {
1774 if (tp_iter == tp) {
1775 chain_info.pprev = pprev;
1776 chain_info.next = tp_iter->next;
1777 WARN_ON(tp_iter->deleting);
1781 /* Verify that tp still exists and no new filters were inserted
1783 * Mark tp for deletion if it is empty.
1785 if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
1786 mutex_unlock(&chain->filter_chain_lock);
1790 next = tcf_chain_dereference(chain_info.next, chain);
1791 if (tp == chain->filter_chain)
1792 tcf_chain0_head_change(chain, next);
1793 RCU_INIT_POINTER(*chain_info.pprev, next);
1794 mutex_unlock(&chain->filter_chain_lock);
1796 tcf_proto_put(tp, rtnl_held, extack);
1799 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1800 struct tcf_chain_info *chain_info,
1801 u32 protocol, u32 prio,
1804 struct tcf_proto **pprev;
1805 struct tcf_proto *tp;
1807 /* Check the chain for existence of proto-tcf with this priority */
1808 for (pprev = &chain->filter_chain;
1809 (tp = tcf_chain_dereference(*pprev, chain));
1810 pprev = &tp->next) {
1811 if (tp->prio >= prio) {
1812 if (tp->prio == prio) {
1813 if (prio_allocate ||
1814 (tp->protocol != protocol && protocol))
1815 return ERR_PTR(-EINVAL);
1822 chain_info->pprev = pprev;
1824 chain_info->next = tp->next;
1827 chain_info->next = NULL;
1832 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1833 struct tcf_proto *tp, struct tcf_block *block,
1834 struct Qdisc *q, u32 parent, void *fh,
1835 u32 portid, u32 seq, u16 flags, int event,
1839 struct nlmsghdr *nlh;
1840 unsigned char *b = skb_tail_pointer(skb);
1842 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1844 goto out_nlmsg_trim;
1845 tcm = nlmsg_data(nlh);
1846 tcm->tcm_family = AF_UNSPEC;
1850 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1851 tcm->tcm_parent = parent;
1853 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1854 tcm->tcm_block_index = block->index;
1856 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1857 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1858 goto nla_put_failure;
1859 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1860 goto nla_put_failure;
1862 tcm->tcm_handle = 0;
1864 if (tp->ops->dump &&
1865 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1866 goto nla_put_failure;
1868 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1877 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1878 struct nlmsghdr *n, struct tcf_proto *tp,
1879 struct tcf_block *block, struct Qdisc *q,
1880 u32 parent, void *fh, int event, bool unicast,
1883 struct sk_buff *skb;
1884 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1887 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1891 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1892 n->nlmsg_seq, n->nlmsg_flags, event,
1899 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1901 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1902 n->nlmsg_flags & NLM_F_ECHO);
1909 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1910 struct nlmsghdr *n, struct tcf_proto *tp,
1911 struct tcf_block *block, struct Qdisc *q,
1912 u32 parent, void *fh, bool unicast, bool *last,
1913 bool rtnl_held, struct netlink_ext_ack *extack)
1915 struct sk_buff *skb;
1916 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1919 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1923 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1924 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1926 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1931 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1938 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1940 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1941 n->nlmsg_flags & NLM_F_ECHO);
1943 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1950 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1951 struct tcf_block *block, struct Qdisc *q,
1952 u32 parent, struct nlmsghdr *n,
1953 struct tcf_chain *chain, int event,
1956 struct tcf_proto *tp;
1958 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1959 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1960 tfilter_notify(net, oskb, n, tp, block,
1961 q, parent, NULL, event, false, rtnl_held);
1964 static void tfilter_put(struct tcf_proto *tp, void *fh)
1966 if (tp->ops->put && fh)
1967 tp->ops->put(tp, fh);
1970 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1971 struct netlink_ext_ack *extack)
1973 struct net *net = sock_net(skb->sk);
1974 struct nlattr *tca[TCA_MAX + 1];
1981 struct Qdisc *q = NULL;
1982 struct tcf_chain_info chain_info;
1983 struct tcf_chain *chain = NULL;
1984 struct tcf_block *block;
1985 struct tcf_proto *tp;
1990 bool rtnl_held = false;
1992 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1998 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1999 rtm_tca_policy, extack);
2004 protocol = TC_H_MIN(t->tcm_info);
2005 prio = TC_H_MAJ(t->tcm_info);
2006 prio_allocate = false;
2007 parent = t->tcm_parent;
2013 /* If no priority is provided by the user,
2016 if (n->nlmsg_flags & NLM_F_CREATE) {
2017 prio = TC_H_MAKE(0x80000000U, 0U);
2018 prio_allocate = true;
2020 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2025 /* Find head of filter chain. */
2027 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2031 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2032 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2033 * type is not specified, classifier is not unlocked.
2036 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2037 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2042 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2046 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2048 if (IS_ERR(block)) {
2049 err = PTR_ERR(block);
2053 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2054 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2055 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2059 chain = tcf_chain_get(block, chain_index, true);
2061 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2066 mutex_lock(&chain->filter_chain_lock);
2067 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2068 prio, prio_allocate);
2070 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2076 struct tcf_proto *tp_new = NULL;
2078 if (chain->flushing) {
2083 /* Proto-tcf does not exist, create new one */
2085 if (tca[TCA_KIND] == NULL || !protocol) {
2086 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2091 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2092 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2098 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2101 mutex_unlock(&chain->filter_chain_lock);
2102 tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
2103 protocol, prio, chain, rtnl_held,
2105 if (IS_ERR(tp_new)) {
2106 err = PTR_ERR(tp_new);
2111 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2118 mutex_unlock(&chain->filter_chain_lock);
2121 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2122 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2127 fh = tp->ops->get(tp, t->tcm_handle);
2130 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2131 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2135 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2136 tfilter_put(tp, fh);
2137 NL_SET_ERR_MSG(extack, "Filter already exists");
2142 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2143 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2148 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2149 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2152 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2153 RTM_NEWTFILTER, false, rtnl_held);
2154 tfilter_put(tp, fh);
2155 q->flags &= ~TCQ_F_CAN_BYPASS;
2159 if (err && tp_created)
2160 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2163 if (tp && !IS_ERR(tp))
2164 tcf_proto_put(tp, rtnl_held, NULL);
2166 tcf_chain_put(chain);
2168 tcf_block_release(q, block, rtnl_held);
2173 if (err == -EAGAIN) {
2174 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2178 /* Replay the request. */
2184 mutex_unlock(&chain->filter_chain_lock);
2188 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2189 struct netlink_ext_ack *extack)
2191 struct net *net = sock_net(skb->sk);
2192 struct nlattr *tca[TCA_MAX + 1];
2198 struct Qdisc *q = NULL;
2199 struct tcf_chain_info chain_info;
2200 struct tcf_chain *chain = NULL;
2201 struct tcf_block *block = NULL;
2202 struct tcf_proto *tp = NULL;
2203 unsigned long cl = 0;
2206 bool rtnl_held = false;
2208 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2211 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2212 rtm_tca_policy, extack);
2217 protocol = TC_H_MIN(t->tcm_info);
2218 prio = TC_H_MAJ(t->tcm_info);
2219 parent = t->tcm_parent;
2221 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2222 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2226 /* Find head of filter chain. */
2228 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2232 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2233 * found), qdisc is not unlocked, classifier type is not specified,
2234 * classifier is not unlocked.
2237 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2238 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2243 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2247 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2249 if (IS_ERR(block)) {
2250 err = PTR_ERR(block);
2254 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2255 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2256 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2260 chain = tcf_chain_get(block, chain_index, false);
2262 /* User requested flush on non-existent chain. Nothing to do,
2263 * so just return success.
2269 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2275 tfilter_notify_chain(net, skb, block, q, parent, n,
2276 chain, RTM_DELTFILTER, rtnl_held);
2277 tcf_chain_flush(chain, rtnl_held);
2282 mutex_lock(&chain->filter_chain_lock);
2283 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2285 if (!tp || IS_ERR(tp)) {
2286 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2287 err = tp ? PTR_ERR(tp) : -ENOENT;
2289 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2290 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2293 } else if (t->tcm_handle == 0) {
2294 tcf_chain_tp_remove(chain, &chain_info, tp);
2295 mutex_unlock(&chain->filter_chain_lock);
2297 tcf_proto_put(tp, rtnl_held, NULL);
2298 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2299 RTM_DELTFILTER, false, rtnl_held);
2303 mutex_unlock(&chain->filter_chain_lock);
2305 fh = tp->ops->get(tp, t->tcm_handle);
2308 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2313 err = tfilter_del_notify(net, skb, n, tp, block,
2314 q, parent, fh, false, &last,
2320 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2325 if (tp && !IS_ERR(tp))
2326 tcf_proto_put(tp, rtnl_held, NULL);
2327 tcf_chain_put(chain);
2329 tcf_block_release(q, block, rtnl_held);
2337 mutex_unlock(&chain->filter_chain_lock);
2341 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2342 struct netlink_ext_ack *extack)
2344 struct net *net = sock_net(skb->sk);
2345 struct nlattr *tca[TCA_MAX + 1];
2351 struct Qdisc *q = NULL;
2352 struct tcf_chain_info chain_info;
2353 struct tcf_chain *chain = NULL;
2354 struct tcf_block *block = NULL;
2355 struct tcf_proto *tp = NULL;
2356 unsigned long cl = 0;
2359 bool rtnl_held = false;
2361 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2362 rtm_tca_policy, extack);
2367 protocol = TC_H_MIN(t->tcm_info);
2368 prio = TC_H_MAJ(t->tcm_info);
2369 parent = t->tcm_parent;
2372 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2376 /* Find head of filter chain. */
2378 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2382 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2383 * unlocked, classifier type is not specified, classifier is not
2386 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2387 !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2392 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2396 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2398 if (IS_ERR(block)) {
2399 err = PTR_ERR(block);
2403 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2404 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2405 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2409 chain = tcf_chain_get(block, chain_index, false);
2411 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2416 mutex_lock(&chain->filter_chain_lock);
2417 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2419 mutex_unlock(&chain->filter_chain_lock);
2420 if (!tp || IS_ERR(tp)) {
2421 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2422 err = tp ? PTR_ERR(tp) : -ENOENT;
2424 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2425 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2430 fh = tp->ops->get(tp, t->tcm_handle);
2433 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2436 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2437 fh, RTM_NEWTFILTER, true, rtnl_held);
2439 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2442 tfilter_put(tp, fh);
2445 if (tp && !IS_ERR(tp))
2446 tcf_proto_put(tp, rtnl_held, NULL);
2447 tcf_chain_put(chain);
2449 tcf_block_release(q, block, rtnl_held);
2457 struct tcf_dump_args {
2458 struct tcf_walker w;
2459 struct sk_buff *skb;
2460 struct netlink_callback *cb;
2461 struct tcf_block *block;
2466 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2468 struct tcf_dump_args *a = (void *)arg;
2469 struct net *net = sock_net(a->skb->sk);
2471 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2472 n, NETLINK_CB(a->cb->skb).portid,
2473 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2474 RTM_NEWTFILTER, true);
2477 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2478 struct sk_buff *skb, struct netlink_callback *cb,
2479 long index_start, long *p_index)
2481 struct net *net = sock_net(skb->sk);
2482 struct tcf_block *block = chain->block;
2483 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2484 struct tcf_proto *tp, *tp_prev;
2485 struct tcf_dump_args arg;
2487 for (tp = __tcf_get_next_proto(chain, NULL);
2490 tp = __tcf_get_next_proto(chain, tp),
2491 tcf_proto_put(tp_prev, true, NULL),
2493 if (*p_index < index_start)
2495 if (TC_H_MAJ(tcm->tcm_info) &&
2496 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2498 if (TC_H_MIN(tcm->tcm_info) &&
2499 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2501 if (*p_index > index_start)
2502 memset(&cb->args[1], 0,
2503 sizeof(cb->args) - sizeof(cb->args[0]));
2504 if (cb->args[1] == 0) {
2505 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2506 NETLINK_CB(cb->skb).portid,
2507 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2508 RTM_NEWTFILTER, true) <= 0)
2514 arg.w.fn = tcf_node_dump;
2519 arg.parent = parent;
2521 arg.w.skip = cb->args[1] - 1;
2523 arg.w.cookie = cb->args[2];
2524 tp->ops->walk(tp, &arg.w, true);
2525 cb->args[2] = arg.w.cookie;
2526 cb->args[1] = arg.w.count + 1;
2533 tcf_proto_put(tp, true, NULL);
2537 /* called with RTNL */
2538 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2540 struct tcf_chain *chain, *chain_prev;
2541 struct net *net = sock_net(skb->sk);
2542 struct nlattr *tca[TCA_MAX + 1];
2543 struct Qdisc *q = NULL;
2544 struct tcf_block *block;
2545 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2551 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2554 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2559 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2560 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2563 /* If we work with block index, q is NULL and parent value
2564 * will never be used in the following code. The check
2565 * in tcf_fill_node prevents it. However, compiler does not
2566 * see that far, so set parent to zero to silence the warning
2567 * about parent being uninitialized.
2571 const struct Qdisc_class_ops *cops;
2572 struct net_device *dev;
2573 unsigned long cl = 0;
2575 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2579 parent = tcm->tcm_parent;
2584 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2588 cops = q->ops->cl_ops;
2591 if (!cops->tcf_block)
2593 if (TC_H_MIN(tcm->tcm_parent)) {
2594 cl = cops->find(q, tcm->tcm_parent);
2598 block = cops->tcf_block(q, cl, NULL);
2601 if (tcf_block_shared(block))
2605 index_start = cb->args[0];
2608 for (chain = __tcf_get_next_chain(block, NULL);
2611 chain = __tcf_get_next_chain(block, chain),
2612 tcf_chain_put(chain_prev)) {
2613 if (tca[TCA_CHAIN] &&
2614 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2616 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2617 index_start, &index)) {
2618 tcf_chain_put(chain);
2624 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2625 tcf_block_refcnt_put(block, true);
2626 cb->args[0] = index;
2629 /* If we did no progress, the error (EMSGSIZE) is real */
2630 if (skb->len == 0 && err)
2635 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2636 void *tmplt_priv, u32 chain_index,
2637 struct net *net, struct sk_buff *skb,
2638 struct tcf_block *block,
2639 u32 portid, u32 seq, u16 flags, int event)
2641 unsigned char *b = skb_tail_pointer(skb);
2642 const struct tcf_proto_ops *ops;
2643 struct nlmsghdr *nlh;
2650 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2652 goto out_nlmsg_trim;
2653 tcm = nlmsg_data(nlh);
2654 tcm->tcm_family = AF_UNSPEC;
2657 tcm->tcm_handle = 0;
2659 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2660 tcm->tcm_parent = block->q->handle;
2662 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2663 tcm->tcm_block_index = block->index;
2666 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2667 goto nla_put_failure;
2670 if (nla_put_string(skb, TCA_KIND, ops->kind))
2671 goto nla_put_failure;
2672 if (ops->tmplt_dump(skb, net, priv) < 0)
2673 goto nla_put_failure;
2676 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2685 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2686 u32 seq, u16 flags, int event, bool unicast)
2688 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2689 struct tcf_block *block = chain->block;
2690 struct net *net = block->net;
2691 struct sk_buff *skb;
2694 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2698 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2699 chain->index, net, skb, block, portid,
2700 seq, flags, event) <= 0) {
2706 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2708 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2709 flags & NLM_F_ECHO);
2716 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2717 void *tmplt_priv, u32 chain_index,
2718 struct tcf_block *block, struct sk_buff *oskb,
2719 u32 seq, u16 flags, bool unicast)
2721 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2722 struct net *net = block->net;
2723 struct sk_buff *skb;
2725 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2729 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2730 block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2736 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2738 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2741 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2742 struct nlattr **tca,
2743 struct netlink_ext_ack *extack)
2745 const struct tcf_proto_ops *ops;
2748 /* If kind is not set, user did not specify template. */
2752 ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack);
2754 return PTR_ERR(ops);
2755 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2756 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2760 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2761 if (IS_ERR(tmplt_priv)) {
2762 module_put(ops->owner);
2763 return PTR_ERR(tmplt_priv);
2765 chain->tmplt_ops = ops;
2766 chain->tmplt_priv = tmplt_priv;
2770 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2773 /* If template ops are set, no work to do for us. */
2777 tmplt_ops->tmplt_destroy(tmplt_priv);
2778 module_put(tmplt_ops->owner);
2781 /* Add/delete/get a chain */
2783 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2784 struct netlink_ext_ack *extack)
2786 struct net *net = sock_net(skb->sk);
2787 struct nlattr *tca[TCA_MAX + 1];
2791 struct Qdisc *q = NULL;
2792 struct tcf_chain *chain = NULL;
2793 struct tcf_block *block;
2797 if (n->nlmsg_type != RTM_GETCHAIN &&
2798 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2802 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2803 rtm_tca_policy, extack);
2808 parent = t->tcm_parent;
2811 block = tcf_block_find(net, &q, &parent, &cl,
2812 t->tcm_ifindex, t->tcm_block_index, extack);
2814 return PTR_ERR(block);
2816 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2817 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2818 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2823 mutex_lock(&block->lock);
2824 chain = tcf_chain_lookup(block, chain_index);
2825 if (n->nlmsg_type == RTM_NEWCHAIN) {
2827 if (tcf_chain_held_by_acts_only(chain)) {
2828 /* The chain exists only because there is
2829 * some action referencing it.
2831 tcf_chain_hold(chain);
2833 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2835 goto errout_block_locked;
2838 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2839 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2841 goto errout_block_locked;
2843 chain = tcf_chain_create(block, chain_index);
2845 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2847 goto errout_block_locked;
2851 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2852 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2854 goto errout_block_locked;
2856 tcf_chain_hold(chain);
2859 if (n->nlmsg_type == RTM_NEWCHAIN) {
2860 /* Modifying chain requires holding parent block lock. In case
2861 * the chain was successfully added, take a reference to the
2862 * chain. This ensures that an empty chain does not disappear at
2863 * the end of this function.
2865 tcf_chain_hold(chain);
2866 chain->explicitly_created = true;
2868 mutex_unlock(&block->lock);
2870 switch (n->nlmsg_type) {
2872 err = tc_chain_tmplt_add(chain, net, tca, extack);
2874 tcf_chain_put_explicitly_created(chain);
2878 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2879 RTM_NEWCHAIN, false);
2882 tfilter_notify_chain(net, skb, block, q, parent, n,
2883 chain, RTM_DELTFILTER, true);
2884 /* Flush the chain first as the user requested chain removal. */
2885 tcf_chain_flush(chain, true);
2886 /* In case the chain was successfully deleted, put a reference
2887 * to the chain previously taken during addition.
2889 tcf_chain_put_explicitly_created(chain);
2892 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2893 n->nlmsg_seq, n->nlmsg_type, true);
2895 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2899 NL_SET_ERR_MSG(extack, "Unsupported message type");
2904 tcf_chain_put(chain);
2906 tcf_block_release(q, block, true);
2908 /* Replay the request. */
2912 errout_block_locked:
2913 mutex_unlock(&block->lock);
2917 /* called with RTNL */
2918 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2920 struct net *net = sock_net(skb->sk);
2921 struct nlattr *tca[TCA_MAX + 1];
2922 struct Qdisc *q = NULL;
2923 struct tcf_block *block;
2924 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2925 struct tcf_chain *chain;
2931 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2934 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2935 rtm_tca_policy, cb->extack);
2939 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2940 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2943 /* If we work with block index, q is NULL and parent value
2944 * will never be used in the following code. The check
2945 * in tcf_fill_node prevents it. However, compiler does not
2946 * see that far, so set parent to zero to silence the warning
2947 * about parent being uninitialized.
2951 const struct Qdisc_class_ops *cops;
2952 struct net_device *dev;
2953 unsigned long cl = 0;
2955 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2959 parent = tcm->tcm_parent;
2964 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2968 cops = q->ops->cl_ops;
2971 if (!cops->tcf_block)
2973 if (TC_H_MIN(tcm->tcm_parent)) {
2974 cl = cops->find(q, tcm->tcm_parent);
2978 block = cops->tcf_block(q, cl, NULL);
2981 if (tcf_block_shared(block))
2985 index_start = cb->args[0];
2988 mutex_lock(&block->lock);
2989 list_for_each_entry(chain, &block->chain_list, list) {
2990 if ((tca[TCA_CHAIN] &&
2991 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2993 if (index < index_start) {
2997 if (tcf_chain_held_by_acts_only(chain))
2999 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3000 chain->index, net, skb, block,
3001 NETLINK_CB(cb->skb).portid,
3002 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3008 mutex_unlock(&block->lock);
3010 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3011 tcf_block_refcnt_put(block, true);
3012 cb->args[0] = index;
3015 /* If we did no progress, the error (EMSGSIZE) is real */
3016 if (skb->len == 0 && err)
3021 void tcf_exts_destroy(struct tcf_exts *exts)
3023 #ifdef CONFIG_NET_CLS_ACT
3024 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3025 kfree(exts->actions);
3026 exts->nr_actions = 0;
3029 EXPORT_SYMBOL(tcf_exts_destroy);
3031 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3032 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3033 bool rtnl_held, struct netlink_ext_ack *extack)
3035 #ifdef CONFIG_NET_CLS_ACT
3037 struct tc_action *act;
3038 size_t attr_size = 0;
3040 if (exts->police && tb[exts->police]) {
3041 act = tcf_action_init_1(net, tp, tb[exts->police],
3042 rate_tlv, "police", ovr,
3043 TCA_ACT_BIND, rtnl_held,
3046 return PTR_ERR(act);
3048 act->type = exts->type = TCA_OLD_COMPAT;
3049 exts->actions[0] = act;
3050 exts->nr_actions = 1;
3051 } else if (exts->action && tb[exts->action]) {
3054 err = tcf_action_init(net, tp, tb[exts->action],
3055 rate_tlv, NULL, ovr, TCA_ACT_BIND,
3056 exts->actions, &attr_size,
3060 exts->nr_actions = err;
3064 if ((exts->action && tb[exts->action]) ||
3065 (exts->police && tb[exts->police])) {
3066 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3073 EXPORT_SYMBOL(tcf_exts_validate);
3075 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3077 #ifdef CONFIG_NET_CLS_ACT
3078 struct tcf_exts old = *dst;
3081 tcf_exts_destroy(&old);
3084 EXPORT_SYMBOL(tcf_exts_change);
3086 #ifdef CONFIG_NET_CLS_ACT
3087 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3089 if (exts->nr_actions == 0)
3092 return exts->actions[0];
3096 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3098 #ifdef CONFIG_NET_CLS_ACT
3099 struct nlattr *nest;
3101 if (exts->action && tcf_exts_has_actions(exts)) {
3103 * again for backward compatible mode - we want
3104 * to work with both old and new modes of entering
3105 * tc data even if iproute2 was newer - jhs
3107 if (exts->type != TCA_OLD_COMPAT) {
3108 nest = nla_nest_start_noflag(skb, exts->action);
3110 goto nla_put_failure;
3112 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
3113 goto nla_put_failure;
3114 nla_nest_end(skb, nest);
3115 } else if (exts->police) {
3116 struct tc_action *act = tcf_exts_first_act(exts);
3117 nest = nla_nest_start_noflag(skb, exts->police);
3118 if (nest == NULL || !act)
3119 goto nla_put_failure;
3120 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3121 goto nla_put_failure;
3122 nla_nest_end(skb, nest);
3128 nla_nest_cancel(skb, nest);
3134 EXPORT_SYMBOL(tcf_exts_dump);
3137 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3139 #ifdef CONFIG_NET_CLS_ACT
3140 struct tc_action *a = tcf_exts_first_act(exts);
3141 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3146 EXPORT_SYMBOL(tcf_exts_dump_stats);
3148 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3149 void *type_data, bool err_stop)
3151 struct flow_block_cb *block_cb;
3155 /* Make sure all netdevs sharing this block are offload-capable. */
3156 if (block->nooffloaddevcnt && err_stop)
3159 list_for_each_entry(block_cb, &block->cb_list, list) {
3160 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3170 EXPORT_SYMBOL(tc_setup_cb_call);
3172 int tc_setup_flow_action(struct flow_action *flow_action,
3173 const struct tcf_exts *exts)
3175 const struct tc_action *act;
3182 tcf_exts_for_each_action(i, act, exts) {
3183 struct flow_action_entry *entry;
3185 entry = &flow_action->entries[j];
3186 if (is_tcf_gact_ok(act)) {
3187 entry->id = FLOW_ACTION_ACCEPT;
3188 } else if (is_tcf_gact_shot(act)) {
3189 entry->id = FLOW_ACTION_DROP;
3190 } else if (is_tcf_gact_trap(act)) {
3191 entry->id = FLOW_ACTION_TRAP;
3192 } else if (is_tcf_gact_goto_chain(act)) {
3193 entry->id = FLOW_ACTION_GOTO;
3194 entry->chain_index = tcf_gact_goto_chain_index(act);
3195 } else if (is_tcf_mirred_egress_redirect(act)) {
3196 entry->id = FLOW_ACTION_REDIRECT;
3197 entry->dev = tcf_mirred_dev(act);
3198 } else if (is_tcf_mirred_egress_mirror(act)) {
3199 entry->id = FLOW_ACTION_MIRRED;
3200 entry->dev = tcf_mirred_dev(act);
3201 } else if (is_tcf_vlan(act)) {
3202 switch (tcf_vlan_action(act)) {
3203 case TCA_VLAN_ACT_PUSH:
3204 entry->id = FLOW_ACTION_VLAN_PUSH;
3205 entry->vlan.vid = tcf_vlan_push_vid(act);
3206 entry->vlan.proto = tcf_vlan_push_proto(act);
3207 entry->vlan.prio = tcf_vlan_push_prio(act);
3209 case TCA_VLAN_ACT_POP:
3210 entry->id = FLOW_ACTION_VLAN_POP;
3212 case TCA_VLAN_ACT_MODIFY:
3213 entry->id = FLOW_ACTION_VLAN_MANGLE;
3214 entry->vlan.vid = tcf_vlan_push_vid(act);
3215 entry->vlan.proto = tcf_vlan_push_proto(act);
3216 entry->vlan.prio = tcf_vlan_push_prio(act);
3221 } else if (is_tcf_tunnel_set(act)) {
3222 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3223 entry->tunnel = tcf_tunnel_info(act);
3224 } else if (is_tcf_tunnel_release(act)) {
3225 entry->id = FLOW_ACTION_TUNNEL_DECAP;
3226 } else if (is_tcf_pedit(act)) {
3227 for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3228 switch (tcf_pedit_cmd(act, k)) {
3229 case TCA_PEDIT_KEY_EX_CMD_SET:
3230 entry->id = FLOW_ACTION_MANGLE;
3232 case TCA_PEDIT_KEY_EX_CMD_ADD:
3233 entry->id = FLOW_ACTION_ADD;
3238 entry->mangle.htype = tcf_pedit_htype(act, k);
3239 entry->mangle.mask = tcf_pedit_mask(act, k);
3240 entry->mangle.val = tcf_pedit_val(act, k);
3241 entry->mangle.offset = tcf_pedit_offset(act, k);
3242 entry = &flow_action->entries[++j];
3244 } else if (is_tcf_csum(act)) {
3245 entry->id = FLOW_ACTION_CSUM;
3246 entry->csum_flags = tcf_csum_update_flags(act);
3247 } else if (is_tcf_skbedit_mark(act)) {
3248 entry->id = FLOW_ACTION_MARK;
3249 entry->mark = tcf_skbedit_mark(act);
3250 } else if (is_tcf_sample(act)) {
3251 entry->id = FLOW_ACTION_SAMPLE;
3252 entry->sample.psample_group =
3253 tcf_sample_psample_group(act);
3254 entry->sample.trunc_size = tcf_sample_trunc_size(act);
3255 entry->sample.truncate = tcf_sample_truncate(act);
3256 entry->sample.rate = tcf_sample_rate(act);
3257 } else if (is_tcf_police(act)) {
3258 entry->id = FLOW_ACTION_POLICE;
3259 entry->police.burst = tcf_police_tcfp_burst(act);
3260 entry->police.rate_bytes_ps =
3261 tcf_police_rate_bytes_ps(act);
3262 } else if (is_tcf_ct(act)) {
3263 entry->id = FLOW_ACTION_CT;
3264 entry->ct.action = tcf_ct_action(act);
3265 entry->ct.zone = tcf_ct_zone(act);
3270 if (!is_tcf_pedit(act))
3277 EXPORT_SYMBOL(tc_setup_flow_action);
3279 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3281 unsigned int num_acts = 0;
3282 struct tc_action *act;
3285 tcf_exts_for_each_action(i, act, exts) {
3286 if (is_tcf_pedit(act))
3287 num_acts += tcf_pedit_nkeys(act);
3293 EXPORT_SYMBOL(tcf_exts_num_actions);
3295 static __net_init int tcf_net_init(struct net *net)
3297 struct tcf_net *tn = net_generic(net, tcf_net_id);
3299 spin_lock_init(&tn->idr_lock);
3304 static void __net_exit tcf_net_exit(struct net *net)
3306 struct tcf_net *tn = net_generic(net, tcf_net_id);
3308 idr_destroy(&tn->idr);
3311 static struct pernet_operations tcf_net_ops = {
3312 .init = tcf_net_init,
3313 .exit = tcf_net_exit,
3315 .size = sizeof(struct tcf_net),
3318 static int __init tc_filter_init(void)
3322 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3326 err = register_pernet_subsys(&tcf_net_ops);
3328 goto err_register_pernet_subsys;
3330 err = rhashtable_init(&indr_setup_block_ht,
3331 &tc_indr_setup_block_ht_params);
3333 goto err_rhash_setup_block_ht;
3335 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3336 RTNL_FLAG_DOIT_UNLOCKED);
3337 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3338 RTNL_FLAG_DOIT_UNLOCKED);
3339 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3340 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3341 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3342 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3343 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3348 err_rhash_setup_block_ht:
3349 unregister_pernet_subsys(&tcf_net_ops);
3350 err_register_pernet_subsys:
3351 destroy_workqueue(tc_filter_wq);
3355 subsys_initcall(tc_filter_init);