1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_api.c Packet classifier API.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/rhashtable.h>
24 #include <linux/jhash.h>
25 #include <linux/rculist.h>
26 #include <net/net_namespace.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
45 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
47 /* The list of all installed classifier types */
48 static LIST_HEAD(tcf_proto_base);
50 /* Protects list of registered TC modules. It is pure SMP lock. */
51 static DEFINE_RWLOCK(cls_mod_lock);
53 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
55 return jhash_3words(tp->chain->index, tp->prio,
56 (__force __u32)tp->protocol, 0);
59 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
62 struct tcf_block *block = chain->block;
64 mutex_lock(&block->proto_destroy_lock);
65 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
66 destroy_obj_hashfn(tp));
67 mutex_unlock(&block->proto_destroy_lock);
70 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
71 const struct tcf_proto *tp2)
73 return tp1->chain->index == tp2->chain->index &&
74 tp1->prio == tp2->prio &&
75 tp1->protocol == tp2->protocol;
78 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
81 u32 hash = destroy_obj_hashfn(tp);
82 struct tcf_proto *iter;
86 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
87 destroy_ht_node, hash) {
88 if (tcf_proto_cmp(tp, iter)) {
99 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
101 struct tcf_block *block = chain->block;
103 mutex_lock(&block->proto_destroy_lock);
104 if (hash_hashed(&tp->destroy_ht_node))
105 hash_del_rcu(&tp->destroy_ht_node);
106 mutex_unlock(&block->proto_destroy_lock);
109 /* Find classifier type by string name */
111 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
113 const struct tcf_proto_ops *t, *res = NULL;
116 read_lock(&cls_mod_lock);
117 list_for_each_entry(t, &tcf_proto_base, head) {
118 if (strcmp(kind, t->kind) == 0) {
119 if (try_module_get(t->owner))
124 read_unlock(&cls_mod_lock);
129 static const struct tcf_proto_ops *
130 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
131 struct netlink_ext_ack *extack)
133 const struct tcf_proto_ops *ops;
135 ops = __tcf_proto_lookup_ops(kind);
138 #ifdef CONFIG_MODULES
141 request_module("cls_%s", kind);
144 ops = __tcf_proto_lookup_ops(kind);
145 /* We dropped the RTNL semaphore in order to perform
146 * the module load. So, even if we succeeded in loading
147 * the module we have to replay the request. We indicate
148 * this using -EAGAIN.
151 module_put(ops->owner);
152 return ERR_PTR(-EAGAIN);
155 NL_SET_ERR_MSG(extack, "TC classifier not found");
156 return ERR_PTR(-ENOENT);
159 /* Register(unregister) new classifier type */
161 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
163 struct tcf_proto_ops *t;
166 write_lock(&cls_mod_lock);
167 list_for_each_entry(t, &tcf_proto_base, head)
168 if (!strcmp(ops->kind, t->kind))
171 list_add_tail(&ops->head, &tcf_proto_base);
174 write_unlock(&cls_mod_lock);
177 EXPORT_SYMBOL(register_tcf_proto_ops);
179 static struct workqueue_struct *tc_filter_wq;
181 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
183 struct tcf_proto_ops *t;
186 /* Wait for outstanding call_rcu()s, if any, from a
187 * tcf_proto_ops's destroy() handler.
190 flush_workqueue(tc_filter_wq);
192 write_lock(&cls_mod_lock);
193 list_for_each_entry(t, &tcf_proto_base, head) {
200 write_unlock(&cls_mod_lock);
203 EXPORT_SYMBOL(unregister_tcf_proto_ops);
205 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
207 INIT_RCU_WORK(rwork, func);
208 return queue_rcu_work(tc_filter_wq, rwork);
210 EXPORT_SYMBOL(tcf_queue_work);
212 /* Select new prio value from the range, managed by kernel. */
214 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
216 u32 first = TC_H_MAKE(0xC0000000U, 0U);
219 first = tp->prio - 1;
221 return TC_H_MAJ(first);
224 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
227 return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
228 memset(name, 0, IFNAMSIZ);
232 static bool tcf_proto_is_unlocked(const char *kind)
234 const struct tcf_proto_ops *ops;
237 if (strlen(kind) == 0)
240 ops = tcf_proto_lookup_ops(kind, false, NULL);
241 /* On error return false to take rtnl lock. Proto lookup/create
242 * functions will perform lookup again and properly handle errors.
247 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
248 module_put(ops->owner);
252 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
253 u32 prio, struct tcf_chain *chain,
255 struct netlink_ext_ack *extack)
257 struct tcf_proto *tp;
260 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
262 return ERR_PTR(-ENOBUFS);
264 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
265 if (IS_ERR(tp->ops)) {
266 err = PTR_ERR(tp->ops);
269 tp->classify = tp->ops->classify;
270 tp->protocol = protocol;
273 spin_lock_init(&tp->lock);
274 refcount_set(&tp->refcnt, 1);
276 err = tp->ops->init(tp);
278 module_put(tp->ops->owner);
288 static void tcf_proto_get(struct tcf_proto *tp)
290 refcount_inc(&tp->refcnt);
293 static void tcf_chain_put(struct tcf_chain *chain);
295 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
296 bool sig_destroy, struct netlink_ext_ack *extack)
298 tp->ops->destroy(tp, rtnl_held, extack);
300 tcf_proto_signal_destroyed(tp->chain, tp);
301 tcf_chain_put(tp->chain);
302 module_put(tp->ops->owner);
306 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
307 struct netlink_ext_ack *extack)
309 if (refcount_dec_and_test(&tp->refcnt))
310 tcf_proto_destroy(tp, rtnl_held, true, extack);
313 static bool tcf_proto_check_delete(struct tcf_proto *tp)
315 if (tp->ops->delete_empty)
316 return tp->ops->delete_empty(tp);
322 static void tcf_proto_mark_delete(struct tcf_proto *tp)
324 spin_lock(&tp->lock);
326 spin_unlock(&tp->lock);
329 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
333 spin_lock(&tp->lock);
334 deleting = tp->deleting;
335 spin_unlock(&tp->lock);
340 #define ASSERT_BLOCK_LOCKED(block) \
341 lockdep_assert_held(&(block)->lock)
343 struct tcf_filter_chain_list_item {
344 struct list_head list;
345 tcf_chain_head_change_t *chain_head_change;
346 void *chain_head_change_priv;
349 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
352 struct tcf_chain *chain;
354 ASSERT_BLOCK_LOCKED(block);
356 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
359 list_add_tail_rcu(&chain->list, &block->chain_list);
360 mutex_init(&chain->filter_chain_lock);
361 chain->block = block;
362 chain->index = chain_index;
365 block->chain0.chain = chain;
369 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
370 struct tcf_proto *tp_head)
372 if (item->chain_head_change)
373 item->chain_head_change(tp_head, item->chain_head_change_priv);
376 static void tcf_chain0_head_change(struct tcf_chain *chain,
377 struct tcf_proto *tp_head)
379 struct tcf_filter_chain_list_item *item;
380 struct tcf_block *block = chain->block;
385 mutex_lock(&block->lock);
386 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
387 tcf_chain_head_change_item(item, tp_head);
388 mutex_unlock(&block->lock);
391 /* Returns true if block can be safely freed. */
393 static bool tcf_chain_detach(struct tcf_chain *chain)
395 struct tcf_block *block = chain->block;
397 ASSERT_BLOCK_LOCKED(block);
399 list_del_rcu(&chain->list);
401 block->chain0.chain = NULL;
403 if (list_empty(&block->chain_list) &&
404 refcount_read(&block->refcnt) == 0)
410 static void tcf_block_destroy(struct tcf_block *block)
412 mutex_destroy(&block->lock);
413 mutex_destroy(&block->proto_destroy_lock);
414 kfree_rcu(block, rcu);
417 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
419 struct tcf_block *block = chain->block;
421 mutex_destroy(&chain->filter_chain_lock);
422 kfree_rcu(chain, rcu);
424 tcf_block_destroy(block);
427 static void tcf_chain_hold(struct tcf_chain *chain)
429 ASSERT_BLOCK_LOCKED(chain->block);
434 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
436 ASSERT_BLOCK_LOCKED(chain->block);
438 /* In case all the references are action references, this
439 * chain should not be shown to the user.
441 return chain->refcnt == chain->action_refcnt;
444 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
447 struct tcf_chain *chain;
449 ASSERT_BLOCK_LOCKED(block);
451 list_for_each_entry(chain, &block->chain_list, list) {
452 if (chain->index == chain_index)
458 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
459 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
462 struct tcf_chain *chain;
464 list_for_each_entry_rcu(chain, &block->chain_list, list) {
465 if (chain->index == chain_index)
472 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
473 u32 seq, u16 flags, int event, bool unicast);
475 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
476 u32 chain_index, bool create,
479 struct tcf_chain *chain = NULL;
480 bool is_first_reference;
482 mutex_lock(&block->lock);
483 chain = tcf_chain_lookup(block, chain_index);
485 tcf_chain_hold(chain);
489 chain = tcf_chain_create(block, chain_index);
495 ++chain->action_refcnt;
496 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
497 mutex_unlock(&block->lock);
499 /* Send notification only in case we got the first
500 * non-action reference. Until then, the chain acts only as
501 * a placeholder for actions pointing to it and user ought
502 * not know about them.
504 if (is_first_reference && !by_act)
505 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
506 RTM_NEWCHAIN, false);
511 mutex_unlock(&block->lock);
515 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
518 return __tcf_chain_get(block, chain_index, create, false);
521 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
523 return __tcf_chain_get(block, chain_index, true, true);
525 EXPORT_SYMBOL(tcf_chain_get_by_act);
527 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
529 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
530 void *tmplt_priv, u32 chain_index,
531 struct tcf_block *block, struct sk_buff *oskb,
532 u32 seq, u16 flags, bool unicast);
534 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
535 bool explicitly_created)
537 struct tcf_block *block = chain->block;
538 const struct tcf_proto_ops *tmplt_ops;
539 bool free_block = false;
543 mutex_lock(&block->lock);
544 if (explicitly_created) {
545 if (!chain->explicitly_created) {
546 mutex_unlock(&block->lock);
549 chain->explicitly_created = false;
553 chain->action_refcnt--;
555 /* tc_chain_notify_delete can't be called while holding block lock.
556 * However, when block is unlocked chain can be changed concurrently, so
557 * save these to temporary variables.
559 refcnt = --chain->refcnt;
560 tmplt_ops = chain->tmplt_ops;
561 tmplt_priv = chain->tmplt_priv;
563 /* The last dropped non-action reference will trigger notification. */
564 if (refcnt - chain->action_refcnt == 0 && !by_act) {
565 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
566 block, NULL, 0, 0, false);
567 /* Last reference to chain, no need to lock. */
568 chain->flushing = false;
572 free_block = tcf_chain_detach(chain);
573 mutex_unlock(&block->lock);
576 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
577 tcf_chain_destroy(chain, free_block);
581 static void tcf_chain_put(struct tcf_chain *chain)
583 __tcf_chain_put(chain, false, false);
586 void tcf_chain_put_by_act(struct tcf_chain *chain)
588 __tcf_chain_put(chain, true, false);
590 EXPORT_SYMBOL(tcf_chain_put_by_act);
592 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
594 __tcf_chain_put(chain, false, true);
597 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
599 struct tcf_proto *tp, *tp_next;
601 mutex_lock(&chain->filter_chain_lock);
602 tp = tcf_chain_dereference(chain->filter_chain, chain);
604 tp_next = rcu_dereference_protected(tp->next, 1);
605 tcf_proto_signal_destroying(chain, tp);
608 tp = tcf_chain_dereference(chain->filter_chain, chain);
609 RCU_INIT_POINTER(chain->filter_chain, NULL);
610 tcf_chain0_head_change(chain, NULL);
611 chain->flushing = true;
612 mutex_unlock(&chain->filter_chain_lock);
615 tp_next = rcu_dereference_protected(tp->next, 1);
616 tcf_proto_put(tp, rtnl_held, NULL);
621 static int tcf_block_setup(struct tcf_block *block,
622 struct flow_block_offload *bo);
624 static void tcf_block_offload_init(struct flow_block_offload *bo,
625 struct net_device *dev,
626 enum flow_block_command command,
627 enum flow_block_binder_type binder_type,
628 struct flow_block *flow_block,
629 bool shared, struct netlink_ext_ack *extack)
631 bo->net = dev_net(dev);
632 bo->command = command;
633 bo->binder_type = binder_type;
634 bo->block = flow_block;
635 bo->block_shared = shared;
637 INIT_LIST_HEAD(&bo->cb_list);
640 static void tcf_block_unbind(struct tcf_block *block,
641 struct flow_block_offload *bo);
643 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
645 struct tcf_block *block = block_cb->indr.data;
646 struct net_device *dev = block_cb->indr.dev;
647 struct netlink_ext_ack extack = {};
648 struct flow_block_offload bo;
650 tcf_block_offload_init(&bo, dev, FLOW_BLOCK_UNBIND,
651 block_cb->indr.binder_type,
652 &block->flow_block, tcf_block_shared(block),
654 down_write(&block->cb_lock);
655 list_del(&block_cb->driver_list);
656 list_move(&block_cb->list, &bo.cb_list);
657 up_write(&block->cb_lock);
659 tcf_block_unbind(block, &bo);
663 static bool tcf_block_offload_in_use(struct tcf_block *block)
665 return atomic_read(&block->offloadcnt);
668 static int tcf_block_offload_cmd(struct tcf_block *block,
669 struct net_device *dev,
670 struct tcf_block_ext_info *ei,
671 enum flow_block_command command,
672 struct netlink_ext_ack *extack)
674 struct flow_block_offload bo = {};
677 tcf_block_offload_init(&bo, dev, command, ei->binder_type,
678 &block->flow_block, tcf_block_shared(block),
681 if (dev->netdev_ops->ndo_setup_tc)
682 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
684 err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block,
685 &bo, tc_block_indr_cleanup);
688 if (err != -EOPNOTSUPP)
689 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
693 return tcf_block_setup(block, &bo);
696 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
697 struct tcf_block_ext_info *ei,
698 struct netlink_ext_ack *extack)
700 struct net_device *dev = q->dev_queue->dev;
703 down_write(&block->cb_lock);
705 /* If tc offload feature is disabled and the block we try to bind
706 * to already has some offloaded filters, forbid to bind.
708 if (dev->netdev_ops->ndo_setup_tc &&
709 !tc_can_offload(dev) &&
710 tcf_block_offload_in_use(block)) {
711 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
716 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
717 if (err == -EOPNOTSUPP)
718 goto no_offload_dev_inc;
722 up_write(&block->cb_lock);
726 if (tcf_block_offload_in_use(block))
730 block->nooffloaddevcnt++;
732 up_write(&block->cb_lock);
736 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
737 struct tcf_block_ext_info *ei)
739 struct net_device *dev = q->dev_queue->dev;
742 down_write(&block->cb_lock);
743 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
744 if (err == -EOPNOTSUPP)
745 goto no_offload_dev_dec;
746 up_write(&block->cb_lock);
750 WARN_ON(block->nooffloaddevcnt-- == 0);
751 up_write(&block->cb_lock);
755 tcf_chain0_head_change_cb_add(struct tcf_block *block,
756 struct tcf_block_ext_info *ei,
757 struct netlink_ext_ack *extack)
759 struct tcf_filter_chain_list_item *item;
760 struct tcf_chain *chain0;
762 item = kmalloc(sizeof(*item), GFP_KERNEL);
764 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
767 item->chain_head_change = ei->chain_head_change;
768 item->chain_head_change_priv = ei->chain_head_change_priv;
770 mutex_lock(&block->lock);
771 chain0 = block->chain0.chain;
773 tcf_chain_hold(chain0);
775 list_add(&item->list, &block->chain0.filter_chain_list);
776 mutex_unlock(&block->lock);
779 struct tcf_proto *tp_head;
781 mutex_lock(&chain0->filter_chain_lock);
783 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
785 tcf_chain_head_change_item(item, tp_head);
787 mutex_lock(&block->lock);
788 list_add(&item->list, &block->chain0.filter_chain_list);
789 mutex_unlock(&block->lock);
791 mutex_unlock(&chain0->filter_chain_lock);
792 tcf_chain_put(chain0);
799 tcf_chain0_head_change_cb_del(struct tcf_block *block,
800 struct tcf_block_ext_info *ei)
802 struct tcf_filter_chain_list_item *item;
804 mutex_lock(&block->lock);
805 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
806 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
807 (item->chain_head_change == ei->chain_head_change &&
808 item->chain_head_change_priv == ei->chain_head_change_priv)) {
809 if (block->chain0.chain)
810 tcf_chain_head_change_item(item, NULL);
811 list_del(&item->list);
812 mutex_unlock(&block->lock);
818 mutex_unlock(&block->lock);
823 spinlock_t idr_lock; /* Protects idr */
827 static unsigned int tcf_net_id;
829 static int tcf_block_insert(struct tcf_block *block, struct net *net,
830 struct netlink_ext_ack *extack)
832 struct tcf_net *tn = net_generic(net, tcf_net_id);
835 idr_preload(GFP_KERNEL);
836 spin_lock(&tn->idr_lock);
837 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
839 spin_unlock(&tn->idr_lock);
845 static void tcf_block_remove(struct tcf_block *block, struct net *net)
847 struct tcf_net *tn = net_generic(net, tcf_net_id);
849 spin_lock(&tn->idr_lock);
850 idr_remove(&tn->idr, block->index);
851 spin_unlock(&tn->idr_lock);
854 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
856 struct netlink_ext_ack *extack)
858 struct tcf_block *block;
860 block = kzalloc(sizeof(*block), GFP_KERNEL);
862 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
863 return ERR_PTR(-ENOMEM);
865 mutex_init(&block->lock);
866 mutex_init(&block->proto_destroy_lock);
867 init_rwsem(&block->cb_lock);
868 flow_block_init(&block->flow_block);
869 INIT_LIST_HEAD(&block->chain_list);
870 INIT_LIST_HEAD(&block->owner_list);
871 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
873 refcount_set(&block->refcnt, 1);
875 block->index = block_index;
877 /* Don't store q pointer for blocks which are shared */
878 if (!tcf_block_shared(block))
883 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
885 struct tcf_net *tn = net_generic(net, tcf_net_id);
887 return idr_find(&tn->idr, block_index);
890 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
892 struct tcf_block *block;
895 block = tcf_block_lookup(net, block_index);
896 if (block && !refcount_inc_not_zero(&block->refcnt))
903 static struct tcf_chain *
904 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
906 mutex_lock(&block->lock);
908 chain = list_is_last(&chain->list, &block->chain_list) ?
909 NULL : list_next_entry(chain, list);
911 chain = list_first_entry_or_null(&block->chain_list,
912 struct tcf_chain, list);
914 /* skip all action-only chains */
915 while (chain && tcf_chain_held_by_acts_only(chain))
916 chain = list_is_last(&chain->list, &block->chain_list) ?
917 NULL : list_next_entry(chain, list);
920 tcf_chain_hold(chain);
921 mutex_unlock(&block->lock);
926 /* Function to be used by all clients that want to iterate over all chains on
927 * block. It properly obtains block->lock and takes reference to chain before
928 * returning it. Users of this function must be tolerant to concurrent chain
929 * insertion/deletion or ensure that no concurrent chain modification is
930 * possible. Note that all netlink dump callbacks cannot guarantee to provide
931 * consistent dump because rtnl lock is released each time skb is filled with
932 * data and sent to user-space.
936 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
938 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
941 tcf_chain_put(chain);
945 EXPORT_SYMBOL(tcf_get_next_chain);
947 static struct tcf_proto *
948 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
953 mutex_lock(&chain->filter_chain_lock);
956 tp = tcf_chain_dereference(chain->filter_chain, chain);
957 } else if (tcf_proto_is_deleting(tp)) {
958 /* 'deleting' flag is set and chain->filter_chain_lock was
959 * unlocked, which means next pointer could be invalid. Restart
963 tp = tcf_chain_dereference(chain->filter_chain, chain);
965 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
966 if (!tp->deleting && tp->prio >= prio)
969 tp = tcf_chain_dereference(tp->next, chain);
975 mutex_unlock(&chain->filter_chain_lock);
980 /* Function to be used by all clients that want to iterate over all tp's on
981 * chain. Users of this function must be tolerant to concurrent tp
982 * insertion/deletion or ensure that no concurrent chain modification is
983 * possible. Note that all netlink dump callbacks cannot guarantee to provide
984 * consistent dump because rtnl lock is released each time skb is filled with
985 * data and sent to user-space.
989 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
992 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
995 tcf_proto_put(tp, rtnl_held, NULL);
999 EXPORT_SYMBOL(tcf_get_next_proto);
1001 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1003 struct tcf_chain *chain;
1005 /* Last reference to block. At this point chains cannot be added or
1006 * removed concurrently.
1008 for (chain = tcf_get_next_chain(block, NULL);
1010 chain = tcf_get_next_chain(block, chain)) {
1011 tcf_chain_put_explicitly_created(chain);
1012 tcf_chain_flush(chain, rtnl_held);
1016 /* Lookup Qdisc and increments its reference counter.
1017 * Set parent, if necessary.
1020 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1021 u32 *parent, int ifindex, bool rtnl_held,
1022 struct netlink_ext_ack *extack)
1024 const struct Qdisc_class_ops *cops;
1025 struct net_device *dev;
1028 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1034 dev = dev_get_by_index_rcu(net, ifindex);
1043 *parent = (*q)->handle;
1045 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1047 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1053 *q = qdisc_refcount_inc_nz(*q);
1055 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1060 /* Is it classful? */
1061 cops = (*q)->ops->cl_ops;
1063 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1068 if (!cops->tcf_block) {
1069 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1075 /* At this point we know that qdisc is not noop_qdisc,
1076 * which means that qdisc holds a reference to net_device
1077 * and we hold a reference to qdisc, so it is safe to release
1089 qdisc_put_unlocked(*q);
1095 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1096 int ifindex, struct netlink_ext_ack *extack)
1098 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1101 /* Do we search for filter, attached to class? */
1102 if (TC_H_MIN(parent)) {
1103 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1105 *cl = cops->find(q, parent);
1107 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1115 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1116 unsigned long cl, int ifindex,
1118 struct netlink_ext_ack *extack)
1120 struct tcf_block *block;
1122 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1123 block = tcf_block_refcnt_get(net, block_index);
1125 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1126 return ERR_PTR(-EINVAL);
1129 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1131 block = cops->tcf_block(q, cl, extack);
1133 return ERR_PTR(-EINVAL);
1135 if (tcf_block_shared(block)) {
1136 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1137 return ERR_PTR(-EOPNOTSUPP);
1140 /* Always take reference to block in order to support execution
1141 * of rules update path of cls API without rtnl lock. Caller
1142 * must release block when it is finished using it. 'if' block
1143 * of this conditional obtain reference to block by calling
1144 * tcf_block_refcnt_get().
1146 refcount_inc(&block->refcnt);
1152 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1153 struct tcf_block_ext_info *ei, bool rtnl_held)
1155 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1156 /* Flushing/putting all chains will cause the block to be
1157 * deallocated when last chain is freed. However, if chain_list
1158 * is empty, block has to be manually deallocated. After block
1159 * reference counter reached 0, it is no longer possible to
1160 * increment it or add new chains to block.
1162 bool free_block = list_empty(&block->chain_list);
1164 mutex_unlock(&block->lock);
1165 if (tcf_block_shared(block))
1166 tcf_block_remove(block, block->net);
1169 tcf_block_offload_unbind(block, q, ei);
1172 tcf_block_destroy(block);
1174 tcf_block_flush_all_chains(block, rtnl_held);
1176 tcf_block_offload_unbind(block, q, ei);
1180 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1182 __tcf_block_put(block, NULL, NULL, rtnl_held);
1186 * Set q, parent, cl when appropriate.
1189 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1190 u32 *parent, unsigned long *cl,
1191 int ifindex, u32 block_index,
1192 struct netlink_ext_ack *extack)
1194 struct tcf_block *block;
1199 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1203 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1207 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1208 if (IS_ERR(block)) {
1209 err = PTR_ERR(block);
1220 return ERR_PTR(err);
1223 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1226 if (!IS_ERR_OR_NULL(block))
1227 tcf_block_refcnt_put(block, rtnl_held);
1233 qdisc_put_unlocked(q);
1237 struct tcf_block_owner_item {
1238 struct list_head list;
1240 enum flow_block_binder_type binder_type;
1244 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1246 enum flow_block_binder_type binder_type)
1248 if (block->keep_dst &&
1249 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1250 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1251 netif_keep_dst(qdisc_dev(q));
1254 void tcf_block_netif_keep_dst(struct tcf_block *block)
1256 struct tcf_block_owner_item *item;
1258 block->keep_dst = true;
1259 list_for_each_entry(item, &block->owner_list, list)
1260 tcf_block_owner_netif_keep_dst(block, item->q,
1263 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1265 static int tcf_block_owner_add(struct tcf_block *block,
1267 enum flow_block_binder_type binder_type)
1269 struct tcf_block_owner_item *item;
1271 item = kmalloc(sizeof(*item), GFP_KERNEL);
1275 item->binder_type = binder_type;
1276 list_add(&item->list, &block->owner_list);
1280 static void tcf_block_owner_del(struct tcf_block *block,
1282 enum flow_block_binder_type binder_type)
1284 struct tcf_block_owner_item *item;
1286 list_for_each_entry(item, &block->owner_list, list) {
1287 if (item->q == q && item->binder_type == binder_type) {
1288 list_del(&item->list);
1296 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1297 struct tcf_block_ext_info *ei,
1298 struct netlink_ext_ack *extack)
1300 struct net *net = qdisc_net(q);
1301 struct tcf_block *block = NULL;
1304 if (ei->block_index)
1305 /* block_index not 0 means the shared block is requested */
1306 block = tcf_block_refcnt_get(net, ei->block_index);
1309 block = tcf_block_create(net, q, ei->block_index, extack);
1311 return PTR_ERR(block);
1312 if (tcf_block_shared(block)) {
1313 err = tcf_block_insert(block, net, extack);
1315 goto err_block_insert;
1319 err = tcf_block_owner_add(block, q, ei->binder_type);
1321 goto err_block_owner_add;
1323 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1325 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1327 goto err_chain0_head_change_cb_add;
1329 err = tcf_block_offload_bind(block, q, ei, extack);
1331 goto err_block_offload_bind;
1336 err_block_offload_bind:
1337 tcf_chain0_head_change_cb_del(block, ei);
1338 err_chain0_head_change_cb_add:
1339 tcf_block_owner_del(block, q, ei->binder_type);
1340 err_block_owner_add:
1342 tcf_block_refcnt_put(block, true);
1345 EXPORT_SYMBOL(tcf_block_get_ext);
1347 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1349 struct tcf_proto __rcu **p_filter_chain = priv;
1351 rcu_assign_pointer(*p_filter_chain, tp_head);
1354 int tcf_block_get(struct tcf_block **p_block,
1355 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1356 struct netlink_ext_ack *extack)
1358 struct tcf_block_ext_info ei = {
1359 .chain_head_change = tcf_chain_head_change_dflt,
1360 .chain_head_change_priv = p_filter_chain,
1363 WARN_ON(!p_filter_chain);
1364 return tcf_block_get_ext(p_block, q, &ei, extack);
1366 EXPORT_SYMBOL(tcf_block_get);
1368 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1369 * actions should be all removed after flushing.
1371 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1372 struct tcf_block_ext_info *ei)
1376 tcf_chain0_head_change_cb_del(block, ei);
1377 tcf_block_owner_del(block, q, ei->binder_type);
1379 __tcf_block_put(block, q, ei, true);
1381 EXPORT_SYMBOL(tcf_block_put_ext);
1383 void tcf_block_put(struct tcf_block *block)
1385 struct tcf_block_ext_info ei = {0, };
1389 tcf_block_put_ext(block, block->q, &ei);
1392 EXPORT_SYMBOL(tcf_block_put);
1395 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1396 void *cb_priv, bool add, bool offload_in_use,
1397 struct netlink_ext_ack *extack)
1399 struct tcf_chain *chain, *chain_prev;
1400 struct tcf_proto *tp, *tp_prev;
1403 lockdep_assert_held(&block->cb_lock);
1405 for (chain = __tcf_get_next_chain(block, NULL);
1408 chain = __tcf_get_next_chain(block, chain),
1409 tcf_chain_put(chain_prev)) {
1410 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1412 tp = __tcf_get_next_proto(chain, tp),
1413 tcf_proto_put(tp_prev, true, NULL)) {
1414 if (tp->ops->reoffload) {
1415 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1418 goto err_playback_remove;
1419 } else if (add && offload_in_use) {
1421 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1422 goto err_playback_remove;
1429 err_playback_remove:
1430 tcf_proto_put(tp, true, NULL);
1431 tcf_chain_put(chain);
1432 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1437 static int tcf_block_bind(struct tcf_block *block,
1438 struct flow_block_offload *bo)
1440 struct flow_block_cb *block_cb, *next;
1443 lockdep_assert_held(&block->cb_lock);
1445 list_for_each_entry(block_cb, &bo->cb_list, list) {
1446 err = tcf_block_playback_offloads(block, block_cb->cb,
1447 block_cb->cb_priv, true,
1448 tcf_block_offload_in_use(block),
1452 if (!bo->unlocked_driver_cb)
1453 block->lockeddevcnt++;
1457 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1462 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1464 list_del(&block_cb->list);
1465 tcf_block_playback_offloads(block, block_cb->cb,
1466 block_cb->cb_priv, false,
1467 tcf_block_offload_in_use(block),
1469 if (!bo->unlocked_driver_cb)
1470 block->lockeddevcnt--;
1472 flow_block_cb_free(block_cb);
1478 static void tcf_block_unbind(struct tcf_block *block,
1479 struct flow_block_offload *bo)
1481 struct flow_block_cb *block_cb, *next;
1483 lockdep_assert_held(&block->cb_lock);
1485 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1486 tcf_block_playback_offloads(block, block_cb->cb,
1487 block_cb->cb_priv, false,
1488 tcf_block_offload_in_use(block),
1490 list_del(&block_cb->list);
1491 flow_block_cb_free(block_cb);
1492 if (!bo->unlocked_driver_cb)
1493 block->lockeddevcnt--;
1497 static int tcf_block_setup(struct tcf_block *block,
1498 struct flow_block_offload *bo)
1502 switch (bo->command) {
1503 case FLOW_BLOCK_BIND:
1504 err = tcf_block_bind(block, bo);
1506 case FLOW_BLOCK_UNBIND:
1508 tcf_block_unbind(block, bo);
1518 /* Main classifier routine: scans classifier chain attached
1519 * to this qdisc, (optionally) tests for protocol and asks
1520 * specific classifiers.
1522 static inline int __tcf_classify(struct sk_buff *skb,
1523 const struct tcf_proto *tp,
1524 const struct tcf_proto *orig_tp,
1525 struct tcf_result *res,
1527 u32 *last_executed_chain)
1529 #ifdef CONFIG_NET_CLS_ACT
1530 const int max_reclassify_loop = 4;
1531 const struct tcf_proto *first_tp;
1536 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1537 __be16 protocol = tc_skb_protocol(skb);
1540 if (tp->protocol != protocol &&
1541 tp->protocol != htons(ETH_P_ALL))
1544 err = tp->classify(skb, tp, res);
1545 #ifdef CONFIG_NET_CLS_ACT
1546 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1548 *last_executed_chain = first_tp->chain->index;
1550 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1551 first_tp = res->goto_tp;
1552 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1560 return TC_ACT_UNSPEC; /* signal: continue lookup */
1561 #ifdef CONFIG_NET_CLS_ACT
1563 if (unlikely(limit++ >= max_reclassify_loop)) {
1564 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1565 tp->chain->block->index,
1567 ntohs(tp->protocol));
1576 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1577 struct tcf_result *res, bool compat_mode)
1579 u32 last_executed_chain = 0;
1581 return __tcf_classify(skb, tp, tp, res, compat_mode,
1582 &last_executed_chain);
1584 EXPORT_SYMBOL(tcf_classify);
1586 int tcf_classify_ingress(struct sk_buff *skb,
1587 const struct tcf_block *ingress_block,
1588 const struct tcf_proto *tp,
1589 struct tcf_result *res, bool compat_mode)
1591 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1592 u32 last_executed_chain = 0;
1594 return __tcf_classify(skb, tp, tp, res, compat_mode,
1595 &last_executed_chain);
1597 u32 last_executed_chain = tp ? tp->chain->index : 0;
1598 const struct tcf_proto *orig_tp = tp;
1599 struct tc_skb_ext *ext;
1602 ext = skb_ext_find(skb, TC_SKB_EXT);
1604 if (ext && ext->chain) {
1605 struct tcf_chain *fchain;
1607 fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain);
1611 /* Consume, so cloned/redirect skbs won't inherit ext */
1612 skb_ext_del(skb, TC_SKB_EXT);
1614 tp = rcu_dereference_bh(fchain->filter_chain);
1615 last_executed_chain = fchain->index;
1618 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1619 &last_executed_chain);
1621 /* If we missed on some chain */
1622 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1623 ext = skb_ext_add(skb, TC_SKB_EXT);
1624 if (WARN_ON_ONCE(!ext))
1626 ext->chain = last_executed_chain;
1632 EXPORT_SYMBOL(tcf_classify_ingress);
1634 struct tcf_chain_info {
1635 struct tcf_proto __rcu **pprev;
1636 struct tcf_proto __rcu *next;
1639 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1640 struct tcf_chain_info *chain_info)
1642 return tcf_chain_dereference(*chain_info->pprev, chain);
1645 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1646 struct tcf_chain_info *chain_info,
1647 struct tcf_proto *tp)
1649 if (chain->flushing)
1652 if (*chain_info->pprev == chain->filter_chain)
1653 tcf_chain0_head_change(chain, tp);
1655 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1656 rcu_assign_pointer(*chain_info->pprev, tp);
1661 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1662 struct tcf_chain_info *chain_info,
1663 struct tcf_proto *tp)
1665 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1667 tcf_proto_mark_delete(tp);
1668 if (tp == chain->filter_chain)
1669 tcf_chain0_head_change(chain, next);
1670 RCU_INIT_POINTER(*chain_info->pprev, next);
1673 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1674 struct tcf_chain_info *chain_info,
1675 u32 protocol, u32 prio,
1676 bool prio_allocate);
1678 /* Try to insert new proto.
1679 * If proto with specified priority already exists, free new proto
1680 * and return existing one.
1683 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1684 struct tcf_proto *tp_new,
1685 u32 protocol, u32 prio,
1688 struct tcf_chain_info chain_info;
1689 struct tcf_proto *tp;
1692 mutex_lock(&chain->filter_chain_lock);
1694 if (tcf_proto_exists_destroying(chain, tp_new)) {
1695 mutex_unlock(&chain->filter_chain_lock);
1696 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1697 return ERR_PTR(-EAGAIN);
1700 tp = tcf_chain_tp_find(chain, &chain_info,
1701 protocol, prio, false);
1703 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1704 mutex_unlock(&chain->filter_chain_lock);
1707 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1710 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1711 tp_new = ERR_PTR(err);
1717 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1718 struct tcf_proto *tp, bool rtnl_held,
1719 struct netlink_ext_ack *extack)
1721 struct tcf_chain_info chain_info;
1722 struct tcf_proto *tp_iter;
1723 struct tcf_proto **pprev;
1724 struct tcf_proto *next;
1726 mutex_lock(&chain->filter_chain_lock);
1728 /* Atomically find and remove tp from chain. */
1729 for (pprev = &chain->filter_chain;
1730 (tp_iter = tcf_chain_dereference(*pprev, chain));
1731 pprev = &tp_iter->next) {
1732 if (tp_iter == tp) {
1733 chain_info.pprev = pprev;
1734 chain_info.next = tp_iter->next;
1735 WARN_ON(tp_iter->deleting);
1739 /* Verify that tp still exists and no new filters were inserted
1741 * Mark tp for deletion if it is empty.
1743 if (!tp_iter || !tcf_proto_check_delete(tp)) {
1744 mutex_unlock(&chain->filter_chain_lock);
1748 tcf_proto_signal_destroying(chain, tp);
1749 next = tcf_chain_dereference(chain_info.next, chain);
1750 if (tp == chain->filter_chain)
1751 tcf_chain0_head_change(chain, next);
1752 RCU_INIT_POINTER(*chain_info.pprev, next);
1753 mutex_unlock(&chain->filter_chain_lock);
1755 tcf_proto_put(tp, rtnl_held, extack);
1758 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1759 struct tcf_chain_info *chain_info,
1760 u32 protocol, u32 prio,
1763 struct tcf_proto **pprev;
1764 struct tcf_proto *tp;
1766 /* Check the chain for existence of proto-tcf with this priority */
1767 for (pprev = &chain->filter_chain;
1768 (tp = tcf_chain_dereference(*pprev, chain));
1769 pprev = &tp->next) {
1770 if (tp->prio >= prio) {
1771 if (tp->prio == prio) {
1772 if (prio_allocate ||
1773 (tp->protocol != protocol && protocol))
1774 return ERR_PTR(-EINVAL);
1781 chain_info->pprev = pprev;
1783 chain_info->next = tp->next;
1786 chain_info->next = NULL;
1791 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1792 struct tcf_proto *tp, struct tcf_block *block,
1793 struct Qdisc *q, u32 parent, void *fh,
1794 u32 portid, u32 seq, u16 flags, int event,
1795 bool terse_dump, bool rtnl_held)
1798 struct nlmsghdr *nlh;
1799 unsigned char *b = skb_tail_pointer(skb);
1801 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1803 goto out_nlmsg_trim;
1804 tcm = nlmsg_data(nlh);
1805 tcm->tcm_family = AF_UNSPEC;
1809 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1810 tcm->tcm_parent = parent;
1812 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1813 tcm->tcm_block_index = block->index;
1815 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1816 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1817 goto nla_put_failure;
1818 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1819 goto nla_put_failure;
1821 tcm->tcm_handle = 0;
1822 } else if (terse_dump) {
1823 if (tp->ops->terse_dump) {
1824 if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1826 goto nla_put_failure;
1828 goto cls_op_not_supp;
1831 if (tp->ops->dump &&
1832 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1833 goto nla_put_failure;
1835 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1845 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1846 struct nlmsghdr *n, struct tcf_proto *tp,
1847 struct tcf_block *block, struct Qdisc *q,
1848 u32 parent, void *fh, int event, bool unicast,
1851 struct sk_buff *skb;
1852 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1855 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1859 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1860 n->nlmsg_seq, n->nlmsg_flags, event,
1861 false, rtnl_held) <= 0) {
1867 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1869 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1870 n->nlmsg_flags & NLM_F_ECHO);
1877 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1878 struct nlmsghdr *n, struct tcf_proto *tp,
1879 struct tcf_block *block, struct Qdisc *q,
1880 u32 parent, void *fh, bool unicast, bool *last,
1881 bool rtnl_held, struct netlink_ext_ack *extack)
1883 struct sk_buff *skb;
1884 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1887 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1891 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1892 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1893 false, rtnl_held) <= 0) {
1894 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1899 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1906 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1908 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1909 n->nlmsg_flags & NLM_F_ECHO);
1911 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1918 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1919 struct tcf_block *block, struct Qdisc *q,
1920 u32 parent, struct nlmsghdr *n,
1921 struct tcf_chain *chain, int event,
1924 struct tcf_proto *tp;
1926 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1927 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1928 tfilter_notify(net, oskb, n, tp, block,
1929 q, parent, NULL, event, false, rtnl_held);
1932 static void tfilter_put(struct tcf_proto *tp, void *fh)
1934 if (tp->ops->put && fh)
1935 tp->ops->put(tp, fh);
1938 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1939 struct netlink_ext_ack *extack)
1941 struct net *net = sock_net(skb->sk);
1942 struct nlattr *tca[TCA_MAX + 1];
1943 char name[IFNAMSIZ];
1950 struct Qdisc *q = NULL;
1951 struct tcf_chain_info chain_info;
1952 struct tcf_chain *chain = NULL;
1953 struct tcf_block *block;
1954 struct tcf_proto *tp;
1959 bool rtnl_held = false;
1961 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1967 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1968 rtm_tca_policy, extack);
1973 protocol = TC_H_MIN(t->tcm_info);
1974 prio = TC_H_MAJ(t->tcm_info);
1975 prio_allocate = false;
1976 parent = t->tcm_parent;
1982 /* If no priority is provided by the user,
1985 if (n->nlmsg_flags & NLM_F_CREATE) {
1986 prio = TC_H_MAKE(0x80000000U, 0U);
1987 prio_allocate = true;
1989 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1994 /* Find head of filter chain. */
1996 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2000 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2001 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2006 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2007 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2008 * type is not specified, classifier is not unlocked.
2011 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2012 !tcf_proto_is_unlocked(name)) {
2017 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2021 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2023 if (IS_ERR(block)) {
2024 err = PTR_ERR(block);
2027 block->classid = parent;
2029 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2030 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2031 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2035 chain = tcf_chain_get(block, chain_index, true);
2037 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2042 mutex_lock(&chain->filter_chain_lock);
2043 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2044 prio, prio_allocate);
2046 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2052 struct tcf_proto *tp_new = NULL;
2054 if (chain->flushing) {
2059 /* Proto-tcf does not exist, create new one */
2061 if (tca[TCA_KIND] == NULL || !protocol) {
2062 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2067 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2068 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2074 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2077 mutex_unlock(&chain->filter_chain_lock);
2078 tp_new = tcf_proto_create(name, protocol, prio, chain,
2080 if (IS_ERR(tp_new)) {
2081 err = PTR_ERR(tp_new);
2086 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2093 mutex_unlock(&chain->filter_chain_lock);
2096 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2097 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2102 fh = tp->ops->get(tp, t->tcm_handle);
2105 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2106 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2110 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2111 tfilter_put(tp, fh);
2112 NL_SET_ERR_MSG(extack, "Filter already exists");
2117 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2118 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2123 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2124 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2127 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2128 RTM_NEWTFILTER, false, rtnl_held);
2129 tfilter_put(tp, fh);
2130 /* q pointer is NULL for shared blocks */
2132 q->flags &= ~TCQ_F_CAN_BYPASS;
2136 if (err && tp_created)
2137 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2140 if (tp && !IS_ERR(tp))
2141 tcf_proto_put(tp, rtnl_held, NULL);
2143 tcf_chain_put(chain);
2145 tcf_block_release(q, block, rtnl_held);
2150 if (err == -EAGAIN) {
2151 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2155 /* Replay the request. */
2161 mutex_unlock(&chain->filter_chain_lock);
2165 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2166 struct netlink_ext_ack *extack)
2168 struct net *net = sock_net(skb->sk);
2169 struct nlattr *tca[TCA_MAX + 1];
2170 char name[IFNAMSIZ];
2176 struct Qdisc *q = NULL;
2177 struct tcf_chain_info chain_info;
2178 struct tcf_chain *chain = NULL;
2179 struct tcf_block *block = NULL;
2180 struct tcf_proto *tp = NULL;
2181 unsigned long cl = 0;
2184 bool rtnl_held = false;
2186 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2189 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2190 rtm_tca_policy, extack);
2195 protocol = TC_H_MIN(t->tcm_info);
2196 prio = TC_H_MAJ(t->tcm_info);
2197 parent = t->tcm_parent;
2199 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2200 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2204 /* Find head of filter chain. */
2206 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2210 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2211 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2215 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2216 * found), qdisc is not unlocked, classifier type is not specified,
2217 * classifier is not unlocked.
2220 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2221 !tcf_proto_is_unlocked(name)) {
2226 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2230 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2232 if (IS_ERR(block)) {
2233 err = PTR_ERR(block);
2237 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2238 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2239 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2243 chain = tcf_chain_get(block, chain_index, false);
2245 /* User requested flush on non-existent chain. Nothing to do,
2246 * so just return success.
2252 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2258 tfilter_notify_chain(net, skb, block, q, parent, n,
2259 chain, RTM_DELTFILTER, rtnl_held);
2260 tcf_chain_flush(chain, rtnl_held);
2265 mutex_lock(&chain->filter_chain_lock);
2266 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2268 if (!tp || IS_ERR(tp)) {
2269 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2270 err = tp ? PTR_ERR(tp) : -ENOENT;
2272 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2273 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2276 } else if (t->tcm_handle == 0) {
2277 tcf_proto_signal_destroying(chain, tp);
2278 tcf_chain_tp_remove(chain, &chain_info, tp);
2279 mutex_unlock(&chain->filter_chain_lock);
2281 tcf_proto_put(tp, rtnl_held, NULL);
2282 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2283 RTM_DELTFILTER, false, rtnl_held);
2287 mutex_unlock(&chain->filter_chain_lock);
2289 fh = tp->ops->get(tp, t->tcm_handle);
2292 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2297 err = tfilter_del_notify(net, skb, n, tp, block,
2298 q, parent, fh, false, &last,
2304 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2309 if (tp && !IS_ERR(tp))
2310 tcf_proto_put(tp, rtnl_held, NULL);
2311 tcf_chain_put(chain);
2313 tcf_block_release(q, block, rtnl_held);
2321 mutex_unlock(&chain->filter_chain_lock);
2325 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2326 struct netlink_ext_ack *extack)
2328 struct net *net = sock_net(skb->sk);
2329 struct nlattr *tca[TCA_MAX + 1];
2330 char name[IFNAMSIZ];
2336 struct Qdisc *q = NULL;
2337 struct tcf_chain_info chain_info;
2338 struct tcf_chain *chain = NULL;
2339 struct tcf_block *block = NULL;
2340 struct tcf_proto *tp = NULL;
2341 unsigned long cl = 0;
2344 bool rtnl_held = false;
2346 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2347 rtm_tca_policy, extack);
2352 protocol = TC_H_MIN(t->tcm_info);
2353 prio = TC_H_MAJ(t->tcm_info);
2354 parent = t->tcm_parent;
2357 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2361 /* Find head of filter chain. */
2363 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2367 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2368 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2372 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2373 * unlocked, classifier type is not specified, classifier is not
2376 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2377 !tcf_proto_is_unlocked(name)) {
2382 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2386 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2388 if (IS_ERR(block)) {
2389 err = PTR_ERR(block);
2393 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2394 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2395 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2399 chain = tcf_chain_get(block, chain_index, false);
2401 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2406 mutex_lock(&chain->filter_chain_lock);
2407 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2409 mutex_unlock(&chain->filter_chain_lock);
2410 if (!tp || IS_ERR(tp)) {
2411 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2412 err = tp ? PTR_ERR(tp) : -ENOENT;
2414 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2415 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2420 fh = tp->ops->get(tp, t->tcm_handle);
2423 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2426 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2427 fh, RTM_NEWTFILTER, true, rtnl_held);
2429 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2432 tfilter_put(tp, fh);
2435 if (tp && !IS_ERR(tp))
2436 tcf_proto_put(tp, rtnl_held, NULL);
2437 tcf_chain_put(chain);
2439 tcf_block_release(q, block, rtnl_held);
2447 struct tcf_dump_args {
2448 struct tcf_walker w;
2449 struct sk_buff *skb;
2450 struct netlink_callback *cb;
2451 struct tcf_block *block;
2457 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2459 struct tcf_dump_args *a = (void *)arg;
2460 struct net *net = sock_net(a->skb->sk);
2462 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2463 n, NETLINK_CB(a->cb->skb).portid,
2464 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2465 RTM_NEWTFILTER, a->terse_dump, true);
2468 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2469 struct sk_buff *skb, struct netlink_callback *cb,
2470 long index_start, long *p_index, bool terse)
2472 struct net *net = sock_net(skb->sk);
2473 struct tcf_block *block = chain->block;
2474 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2475 struct tcf_proto *tp, *tp_prev;
2476 struct tcf_dump_args arg;
2478 for (tp = __tcf_get_next_proto(chain, NULL);
2481 tp = __tcf_get_next_proto(chain, tp),
2482 tcf_proto_put(tp_prev, true, NULL),
2484 if (*p_index < index_start)
2486 if (TC_H_MAJ(tcm->tcm_info) &&
2487 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2489 if (TC_H_MIN(tcm->tcm_info) &&
2490 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2492 if (*p_index > index_start)
2493 memset(&cb->args[1], 0,
2494 sizeof(cb->args) - sizeof(cb->args[0]));
2495 if (cb->args[1] == 0) {
2496 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2497 NETLINK_CB(cb->skb).portid,
2498 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2499 RTM_NEWTFILTER, false, true) <= 0)
2505 arg.w.fn = tcf_node_dump;
2510 arg.parent = parent;
2512 arg.w.skip = cb->args[1] - 1;
2514 arg.w.cookie = cb->args[2];
2515 arg.terse_dump = terse;
2516 tp->ops->walk(tp, &arg.w, true);
2517 cb->args[2] = arg.w.cookie;
2518 cb->args[1] = arg.w.count + 1;
2525 tcf_proto_put(tp, true, NULL);
2529 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2530 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2533 /* called with RTNL */
2534 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2536 struct tcf_chain *chain, *chain_prev;
2537 struct net *net = sock_net(skb->sk);
2538 struct nlattr *tca[TCA_MAX + 1];
2539 struct Qdisc *q = NULL;
2540 struct tcf_block *block;
2541 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2542 bool terse_dump = false;
2548 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2551 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2552 tcf_tfilter_dump_policy, cb->extack);
2556 if (tca[TCA_DUMP_FLAGS]) {
2557 struct nla_bitfield32 flags =
2558 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2560 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2563 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2564 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2567 /* If we work with block index, q is NULL and parent value
2568 * will never be used in the following code. The check
2569 * in tcf_fill_node prevents it. However, compiler does not
2570 * see that far, so set parent to zero to silence the warning
2571 * about parent being uninitialized.
2575 const struct Qdisc_class_ops *cops;
2576 struct net_device *dev;
2577 unsigned long cl = 0;
2579 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2583 parent = tcm->tcm_parent;
2587 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2590 cops = q->ops->cl_ops;
2593 if (!cops->tcf_block)
2595 if (TC_H_MIN(tcm->tcm_parent)) {
2596 cl = cops->find(q, tcm->tcm_parent);
2600 block = cops->tcf_block(q, cl, NULL);
2603 parent = block->classid;
2604 if (tcf_block_shared(block))
2608 index_start = cb->args[0];
2611 for (chain = __tcf_get_next_chain(block, NULL);
2614 chain = __tcf_get_next_chain(block, chain),
2615 tcf_chain_put(chain_prev)) {
2616 if (tca[TCA_CHAIN] &&
2617 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2619 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2620 index_start, &index, terse_dump)) {
2621 tcf_chain_put(chain);
2627 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2628 tcf_block_refcnt_put(block, true);
2629 cb->args[0] = index;
2632 /* If we did no progress, the error (EMSGSIZE) is real */
2633 if (skb->len == 0 && err)
2638 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2639 void *tmplt_priv, u32 chain_index,
2640 struct net *net, struct sk_buff *skb,
2641 struct tcf_block *block,
2642 u32 portid, u32 seq, u16 flags, int event)
2644 unsigned char *b = skb_tail_pointer(skb);
2645 const struct tcf_proto_ops *ops;
2646 struct nlmsghdr *nlh;
2653 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2655 goto out_nlmsg_trim;
2656 tcm = nlmsg_data(nlh);
2657 tcm->tcm_family = AF_UNSPEC;
2660 tcm->tcm_handle = 0;
2662 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2663 tcm->tcm_parent = block->q->handle;
2665 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2666 tcm->tcm_block_index = block->index;
2669 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2670 goto nla_put_failure;
2673 if (nla_put_string(skb, TCA_KIND, ops->kind))
2674 goto nla_put_failure;
2675 if (ops->tmplt_dump(skb, net, priv) < 0)
2676 goto nla_put_failure;
2679 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2688 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2689 u32 seq, u16 flags, int event, bool unicast)
2691 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2692 struct tcf_block *block = chain->block;
2693 struct net *net = block->net;
2694 struct sk_buff *skb;
2697 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2701 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2702 chain->index, net, skb, block, portid,
2703 seq, flags, event) <= 0) {
2709 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2711 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2712 flags & NLM_F_ECHO);
2719 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2720 void *tmplt_priv, u32 chain_index,
2721 struct tcf_block *block, struct sk_buff *oskb,
2722 u32 seq, u16 flags, bool unicast)
2724 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2725 struct net *net = block->net;
2726 struct sk_buff *skb;
2728 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2732 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2733 block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2739 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2741 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2744 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2745 struct nlattr **tca,
2746 struct netlink_ext_ack *extack)
2748 const struct tcf_proto_ops *ops;
2749 char name[IFNAMSIZ];
2752 /* If kind is not set, user did not specify template. */
2756 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2757 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2761 ops = tcf_proto_lookup_ops(name, true, extack);
2763 return PTR_ERR(ops);
2764 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2765 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2769 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2770 if (IS_ERR(tmplt_priv)) {
2771 module_put(ops->owner);
2772 return PTR_ERR(tmplt_priv);
2774 chain->tmplt_ops = ops;
2775 chain->tmplt_priv = tmplt_priv;
2779 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2782 /* If template ops are set, no work to do for us. */
2786 tmplt_ops->tmplt_destroy(tmplt_priv);
2787 module_put(tmplt_ops->owner);
2790 /* Add/delete/get a chain */
2792 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2793 struct netlink_ext_ack *extack)
2795 struct net *net = sock_net(skb->sk);
2796 struct nlattr *tca[TCA_MAX + 1];
2800 struct Qdisc *q = NULL;
2801 struct tcf_chain *chain = NULL;
2802 struct tcf_block *block;
2806 if (n->nlmsg_type != RTM_GETCHAIN &&
2807 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2811 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2812 rtm_tca_policy, extack);
2817 parent = t->tcm_parent;
2820 block = tcf_block_find(net, &q, &parent, &cl,
2821 t->tcm_ifindex, t->tcm_block_index, extack);
2823 return PTR_ERR(block);
2825 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2826 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2827 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2832 mutex_lock(&block->lock);
2833 chain = tcf_chain_lookup(block, chain_index);
2834 if (n->nlmsg_type == RTM_NEWCHAIN) {
2836 if (tcf_chain_held_by_acts_only(chain)) {
2837 /* The chain exists only because there is
2838 * some action referencing it.
2840 tcf_chain_hold(chain);
2842 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2844 goto errout_block_locked;
2847 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2848 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2850 goto errout_block_locked;
2852 chain = tcf_chain_create(block, chain_index);
2854 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2856 goto errout_block_locked;
2860 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2861 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2863 goto errout_block_locked;
2865 tcf_chain_hold(chain);
2868 if (n->nlmsg_type == RTM_NEWCHAIN) {
2869 /* Modifying chain requires holding parent block lock. In case
2870 * the chain was successfully added, take a reference to the
2871 * chain. This ensures that an empty chain does not disappear at
2872 * the end of this function.
2874 tcf_chain_hold(chain);
2875 chain->explicitly_created = true;
2877 mutex_unlock(&block->lock);
2879 switch (n->nlmsg_type) {
2881 err = tc_chain_tmplt_add(chain, net, tca, extack);
2883 tcf_chain_put_explicitly_created(chain);
2887 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2888 RTM_NEWCHAIN, false);
2891 tfilter_notify_chain(net, skb, block, q, parent, n,
2892 chain, RTM_DELTFILTER, true);
2893 /* Flush the chain first as the user requested chain removal. */
2894 tcf_chain_flush(chain, true);
2895 /* In case the chain was successfully deleted, put a reference
2896 * to the chain previously taken during addition.
2898 tcf_chain_put_explicitly_created(chain);
2901 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2902 n->nlmsg_seq, n->nlmsg_type, true);
2904 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2908 NL_SET_ERR_MSG(extack, "Unsupported message type");
2913 tcf_chain_put(chain);
2915 tcf_block_release(q, block, true);
2917 /* Replay the request. */
2921 errout_block_locked:
2922 mutex_unlock(&block->lock);
2926 /* called with RTNL */
2927 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2929 struct net *net = sock_net(skb->sk);
2930 struct nlattr *tca[TCA_MAX + 1];
2931 struct Qdisc *q = NULL;
2932 struct tcf_block *block;
2933 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2934 struct tcf_chain *chain;
2940 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2943 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2944 rtm_tca_policy, cb->extack);
2948 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2949 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2952 /* If we work with block index, q is NULL and parent value
2953 * will never be used in the following code. The check
2954 * in tcf_fill_node prevents it. However, compiler does not
2955 * see that far, so set parent to zero to silence the warning
2956 * about parent being uninitialized.
2960 const struct Qdisc_class_ops *cops;
2961 struct net_device *dev;
2962 unsigned long cl = 0;
2964 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2968 parent = tcm->tcm_parent;
2973 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2977 cops = q->ops->cl_ops;
2980 if (!cops->tcf_block)
2982 if (TC_H_MIN(tcm->tcm_parent)) {
2983 cl = cops->find(q, tcm->tcm_parent);
2987 block = cops->tcf_block(q, cl, NULL);
2990 if (tcf_block_shared(block))
2994 index_start = cb->args[0];
2997 mutex_lock(&block->lock);
2998 list_for_each_entry(chain, &block->chain_list, list) {
2999 if ((tca[TCA_CHAIN] &&
3000 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3002 if (index < index_start) {
3006 if (tcf_chain_held_by_acts_only(chain))
3008 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3009 chain->index, net, skb, block,
3010 NETLINK_CB(cb->skb).portid,
3011 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3017 mutex_unlock(&block->lock);
3019 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3020 tcf_block_refcnt_put(block, true);
3021 cb->args[0] = index;
3024 /* If we did no progress, the error (EMSGSIZE) is real */
3025 if (skb->len == 0 && err)
3030 void tcf_exts_destroy(struct tcf_exts *exts)
3032 #ifdef CONFIG_NET_CLS_ACT
3033 if (exts->actions) {
3034 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3035 kfree(exts->actions);
3037 exts->nr_actions = 0;
3040 EXPORT_SYMBOL(tcf_exts_destroy);
3042 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3043 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3044 bool rtnl_held, struct netlink_ext_ack *extack)
3046 #ifdef CONFIG_NET_CLS_ACT
3048 struct tc_action *act;
3049 size_t attr_size = 0;
3051 if (exts->police && tb[exts->police]) {
3052 act = tcf_action_init_1(net, tp, tb[exts->police],
3053 rate_tlv, "police", ovr,
3054 TCA_ACT_BIND, rtnl_held,
3057 return PTR_ERR(act);
3059 act->type = exts->type = TCA_OLD_COMPAT;
3060 exts->actions[0] = act;
3061 exts->nr_actions = 1;
3062 } else if (exts->action && tb[exts->action]) {
3065 err = tcf_action_init(net, tp, tb[exts->action],
3066 rate_tlv, NULL, ovr, TCA_ACT_BIND,
3067 exts->actions, &attr_size,
3071 exts->nr_actions = err;
3075 if ((exts->action && tb[exts->action]) ||
3076 (exts->police && tb[exts->police])) {
3077 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3084 EXPORT_SYMBOL(tcf_exts_validate);
3086 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3088 #ifdef CONFIG_NET_CLS_ACT
3089 struct tcf_exts old = *dst;
3092 tcf_exts_destroy(&old);
3095 EXPORT_SYMBOL(tcf_exts_change);
3097 #ifdef CONFIG_NET_CLS_ACT
3098 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3100 if (exts->nr_actions == 0)
3103 return exts->actions[0];
3107 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3109 #ifdef CONFIG_NET_CLS_ACT
3110 struct nlattr *nest;
3112 if (exts->action && tcf_exts_has_actions(exts)) {
3114 * again for backward compatible mode - we want
3115 * to work with both old and new modes of entering
3116 * tc data even if iproute2 was newer - jhs
3118 if (exts->type != TCA_OLD_COMPAT) {
3119 nest = nla_nest_start_noflag(skb, exts->action);
3121 goto nla_put_failure;
3123 if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3125 goto nla_put_failure;
3126 nla_nest_end(skb, nest);
3127 } else if (exts->police) {
3128 struct tc_action *act = tcf_exts_first_act(exts);
3129 nest = nla_nest_start_noflag(skb, exts->police);
3130 if (nest == NULL || !act)
3131 goto nla_put_failure;
3132 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3133 goto nla_put_failure;
3134 nla_nest_end(skb, nest);
3140 nla_nest_cancel(skb, nest);
3146 EXPORT_SYMBOL(tcf_exts_dump);
3148 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3150 #ifdef CONFIG_NET_CLS_ACT
3151 struct nlattr *nest;
3153 if (!exts->action || !tcf_exts_has_actions(exts))
3156 nest = nla_nest_start_noflag(skb, exts->action);
3158 goto nla_put_failure;
3160 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3161 goto nla_put_failure;
3162 nla_nest_end(skb, nest);
3166 nla_nest_cancel(skb, nest);
3172 EXPORT_SYMBOL(tcf_exts_terse_dump);
3174 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3176 #ifdef CONFIG_NET_CLS_ACT
3177 struct tc_action *a = tcf_exts_first_act(exts);
3178 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3183 EXPORT_SYMBOL(tcf_exts_dump_stats);
3185 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3187 if (*flags & TCA_CLS_FLAGS_IN_HW)
3189 *flags |= TCA_CLS_FLAGS_IN_HW;
3190 atomic_inc(&block->offloadcnt);
3193 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3195 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3197 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3198 atomic_dec(&block->offloadcnt);
3201 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3202 struct tcf_proto *tp, u32 *cnt,
3203 u32 *flags, u32 diff, bool add)
3205 lockdep_assert_held(&block->cb_lock);
3207 spin_lock(&tp->lock);
3210 tcf_block_offload_inc(block, flags);
3215 tcf_block_offload_dec(block, flags);
3217 spin_unlock(&tp->lock);
3221 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3222 u32 *cnt, u32 *flags)
3224 lockdep_assert_held(&block->cb_lock);
3226 spin_lock(&tp->lock);
3227 tcf_block_offload_dec(block, flags);
3229 spin_unlock(&tp->lock);
3233 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3234 void *type_data, bool err_stop)
3236 struct flow_block_cb *block_cb;
3240 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3241 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3252 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3253 void *type_data, bool err_stop, bool rtnl_held)
3255 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3261 down_read(&block->cb_lock);
3262 /* Need to obtain rtnl lock if block is bound to devs that require it.
3263 * In block bind code cb_lock is obtained while holding rtnl, so we must
3264 * obtain the locks in same order here.
3266 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3267 up_read(&block->cb_lock);
3272 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3274 up_read(&block->cb_lock);
3279 EXPORT_SYMBOL(tc_setup_cb_call);
3281 /* Non-destructive filter add. If filter that wasn't already in hardware is
3282 * successfully offloaded, increment block offloads counter. On failure,
3283 * previously offloaded filter is considered to be intact and offloads counter
3284 * is not decremented.
3287 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3288 enum tc_setup_type type, void *type_data, bool err_stop,
3289 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3291 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3297 down_read(&block->cb_lock);
3298 /* Need to obtain rtnl lock if block is bound to devs that require it.
3299 * In block bind code cb_lock is obtained while holding rtnl, so we must
3300 * obtain the locks in same order here.
3302 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3303 up_read(&block->cb_lock);
3308 /* Make sure all netdevs sharing this block are offload-capable. */
3309 if (block->nooffloaddevcnt && err_stop) {
3310 ok_count = -EOPNOTSUPP;
3314 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3318 if (tp->ops->hw_add)
3319 tp->ops->hw_add(tp, type_data);
3321 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3324 up_read(&block->cb_lock);
3327 return ok_count < 0 ? ok_count : 0;
3329 EXPORT_SYMBOL(tc_setup_cb_add);
3331 /* Destructive filter replace. If filter that wasn't already in hardware is
3332 * successfully offloaded, increment block offload counter. On failure,
3333 * previously offloaded filter is considered to be destroyed and offload counter
3337 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3338 enum tc_setup_type type, void *type_data, bool err_stop,
3339 u32 *old_flags, unsigned int *old_in_hw_count,
3340 u32 *new_flags, unsigned int *new_in_hw_count,
3343 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3349 down_read(&block->cb_lock);
3350 /* Need to obtain rtnl lock if block is bound to devs that require it.
3351 * In block bind code cb_lock is obtained while holding rtnl, so we must
3352 * obtain the locks in same order here.
3354 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3355 up_read(&block->cb_lock);
3360 /* Make sure all netdevs sharing this block are offload-capable. */
3361 if (block->nooffloaddevcnt && err_stop) {
3362 ok_count = -EOPNOTSUPP;
3366 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3367 if (tp->ops->hw_del)
3368 tp->ops->hw_del(tp, type_data);
3370 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3374 if (tp->ops->hw_add)
3375 tp->ops->hw_add(tp, type_data);
3377 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3378 new_flags, ok_count, true);
3380 up_read(&block->cb_lock);
3383 return ok_count < 0 ? ok_count : 0;
3385 EXPORT_SYMBOL(tc_setup_cb_replace);
3387 /* Destroy filter and decrement block offload counter, if filter was previously
3391 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3392 enum tc_setup_type type, void *type_data, bool err_stop,
3393 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3395 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3401 down_read(&block->cb_lock);
3402 /* Need to obtain rtnl lock if block is bound to devs that require it.
3403 * In block bind code cb_lock is obtained while holding rtnl, so we must
3404 * obtain the locks in same order here.
3406 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3407 up_read(&block->cb_lock);
3412 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3414 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3415 if (tp->ops->hw_del)
3416 tp->ops->hw_del(tp, type_data);
3418 up_read(&block->cb_lock);
3421 return ok_count < 0 ? ok_count : 0;
3423 EXPORT_SYMBOL(tc_setup_cb_destroy);
3425 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3426 bool add, flow_setup_cb_t *cb,
3427 enum tc_setup_type type, void *type_data,
3428 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3430 int err = cb(type, type_data, cb_priv);
3433 if (add && tc_skip_sw(*flags))
3436 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3442 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3444 static int tcf_act_get_cookie(struct flow_action_entry *entry,
3445 const struct tc_action *act)
3447 struct tc_cookie *cookie;
3451 cookie = rcu_dereference(act->act_cookie);
3453 entry->cookie = flow_action_cookie_create(cookie->data,
3463 static void tcf_act_put_cookie(struct flow_action_entry *entry)
3465 flow_action_cookie_destroy(entry->cookie);
3468 void tc_cleanup_flow_action(struct flow_action *flow_action)
3470 struct flow_action_entry *entry;
3473 flow_action_for_each(i, entry, flow_action) {
3474 tcf_act_put_cookie(entry);
3475 if (entry->destructor)
3476 entry->destructor(entry->destructor_priv);
3479 EXPORT_SYMBOL(tc_cleanup_flow_action);
3481 static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3482 const struct tc_action *act)
3484 #ifdef CONFIG_NET_CLS_ACT
3485 entry->dev = act->ops->get_dev(act, &entry->destructor);
3488 entry->destructor_priv = entry->dev;
3492 static void tcf_tunnel_encap_put_tunnel(void *priv)
3494 struct ip_tunnel_info *tunnel = priv;
3499 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3500 const struct tc_action *act)
3502 entry->tunnel = tcf_tunnel_info_copy(act);
3505 entry->destructor = tcf_tunnel_encap_put_tunnel;
3506 entry->destructor_priv = entry->tunnel;
3510 static void tcf_sample_get_group(struct flow_action_entry *entry,
3511 const struct tc_action *act)
3513 #ifdef CONFIG_NET_CLS_ACT
3514 entry->sample.psample_group =
3515 act->ops->get_psample_group(act, &entry->destructor);
3516 entry->destructor_priv = entry->sample.psample_group;
3520 static void tcf_gate_entry_destructor(void *priv)
3522 struct action_gate_entry *oe = priv;
3527 static int tcf_gate_get_entries(struct flow_action_entry *entry,
3528 const struct tc_action *act)
3530 entry->gate.entries = tcf_gate_get_list(act);
3532 if (!entry->gate.entries)
3535 entry->destructor = tcf_gate_entry_destructor;
3536 entry->destructor_priv = entry->gate.entries;
3541 static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
3543 if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
3544 return FLOW_ACTION_HW_STATS_DONT_CARE;
3546 return FLOW_ACTION_HW_STATS_DISABLED;
3551 int tc_setup_flow_action(struct flow_action *flow_action,
3552 const struct tcf_exts *exts)
3554 struct tc_action *act;
3555 int i, j, k, err = 0;
3557 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3558 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3559 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3565 tcf_exts_for_each_action(i, act, exts) {
3566 struct flow_action_entry *entry;
3568 entry = &flow_action->entries[j];
3569 spin_lock_bh(&act->tcfa_lock);
3570 err = tcf_act_get_cookie(entry, act);
3572 goto err_out_locked;
3574 entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3576 if (is_tcf_gact_ok(act)) {
3577 entry->id = FLOW_ACTION_ACCEPT;
3578 } else if (is_tcf_gact_shot(act)) {
3579 entry->id = FLOW_ACTION_DROP;
3580 } else if (is_tcf_gact_trap(act)) {
3581 entry->id = FLOW_ACTION_TRAP;
3582 } else if (is_tcf_gact_goto_chain(act)) {
3583 entry->id = FLOW_ACTION_GOTO;
3584 entry->chain_index = tcf_gact_goto_chain_index(act);
3585 } else if (is_tcf_mirred_egress_redirect(act)) {
3586 entry->id = FLOW_ACTION_REDIRECT;
3587 tcf_mirred_get_dev(entry, act);
3588 } else if (is_tcf_mirred_egress_mirror(act)) {
3589 entry->id = FLOW_ACTION_MIRRED;
3590 tcf_mirred_get_dev(entry, act);
3591 } else if (is_tcf_mirred_ingress_redirect(act)) {
3592 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3593 tcf_mirred_get_dev(entry, act);
3594 } else if (is_tcf_mirred_ingress_mirror(act)) {
3595 entry->id = FLOW_ACTION_MIRRED_INGRESS;
3596 tcf_mirred_get_dev(entry, act);
3597 } else if (is_tcf_vlan(act)) {
3598 switch (tcf_vlan_action(act)) {
3599 case TCA_VLAN_ACT_PUSH:
3600 entry->id = FLOW_ACTION_VLAN_PUSH;
3601 entry->vlan.vid = tcf_vlan_push_vid(act);
3602 entry->vlan.proto = tcf_vlan_push_proto(act);
3603 entry->vlan.prio = tcf_vlan_push_prio(act);
3605 case TCA_VLAN_ACT_POP:
3606 entry->id = FLOW_ACTION_VLAN_POP;
3608 case TCA_VLAN_ACT_MODIFY:
3609 entry->id = FLOW_ACTION_VLAN_MANGLE;
3610 entry->vlan.vid = tcf_vlan_push_vid(act);
3611 entry->vlan.proto = tcf_vlan_push_proto(act);
3612 entry->vlan.prio = tcf_vlan_push_prio(act);
3616 goto err_out_locked;
3618 } else if (is_tcf_tunnel_set(act)) {
3619 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3620 err = tcf_tunnel_encap_get_tunnel(entry, act);
3622 goto err_out_locked;
3623 } else if (is_tcf_tunnel_release(act)) {
3624 entry->id = FLOW_ACTION_TUNNEL_DECAP;
3625 } else if (is_tcf_pedit(act)) {
3626 for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3627 switch (tcf_pedit_cmd(act, k)) {
3628 case TCA_PEDIT_KEY_EX_CMD_SET:
3629 entry->id = FLOW_ACTION_MANGLE;
3631 case TCA_PEDIT_KEY_EX_CMD_ADD:
3632 entry->id = FLOW_ACTION_ADD;
3636 goto err_out_locked;
3638 entry->mangle.htype = tcf_pedit_htype(act, k);
3639 entry->mangle.mask = tcf_pedit_mask(act, k);
3640 entry->mangle.val = tcf_pedit_val(act, k);
3641 entry->mangle.offset = tcf_pedit_offset(act, k);
3642 entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3643 entry = &flow_action->entries[++j];
3645 } else if (is_tcf_csum(act)) {
3646 entry->id = FLOW_ACTION_CSUM;
3647 entry->csum_flags = tcf_csum_update_flags(act);
3648 } else if (is_tcf_skbedit_mark(act)) {
3649 entry->id = FLOW_ACTION_MARK;
3650 entry->mark = tcf_skbedit_mark(act);
3651 } else if (is_tcf_sample(act)) {
3652 entry->id = FLOW_ACTION_SAMPLE;
3653 entry->sample.trunc_size = tcf_sample_trunc_size(act);
3654 entry->sample.truncate = tcf_sample_truncate(act);
3655 entry->sample.rate = tcf_sample_rate(act);
3656 tcf_sample_get_group(entry, act);
3657 } else if (is_tcf_police(act)) {
3658 entry->id = FLOW_ACTION_POLICE;
3659 entry->police.burst = tcf_police_tcfp_burst(act);
3660 entry->police.rate_bytes_ps =
3661 tcf_police_rate_bytes_ps(act);
3662 } else if (is_tcf_ct(act)) {
3663 entry->id = FLOW_ACTION_CT;
3664 entry->ct.action = tcf_ct_action(act);
3665 entry->ct.zone = tcf_ct_zone(act);
3666 entry->ct.flow_table = tcf_ct_ft(act);
3667 } else if (is_tcf_mpls(act)) {
3668 switch (tcf_mpls_action(act)) {
3669 case TCA_MPLS_ACT_PUSH:
3670 entry->id = FLOW_ACTION_MPLS_PUSH;
3671 entry->mpls_push.proto = tcf_mpls_proto(act);
3672 entry->mpls_push.label = tcf_mpls_label(act);
3673 entry->mpls_push.tc = tcf_mpls_tc(act);
3674 entry->mpls_push.bos = tcf_mpls_bos(act);
3675 entry->mpls_push.ttl = tcf_mpls_ttl(act);
3677 case TCA_MPLS_ACT_POP:
3678 entry->id = FLOW_ACTION_MPLS_POP;
3679 entry->mpls_pop.proto = tcf_mpls_proto(act);
3681 case TCA_MPLS_ACT_MODIFY:
3682 entry->id = FLOW_ACTION_MPLS_MANGLE;
3683 entry->mpls_mangle.label = tcf_mpls_label(act);
3684 entry->mpls_mangle.tc = tcf_mpls_tc(act);
3685 entry->mpls_mangle.bos = tcf_mpls_bos(act);
3686 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3689 goto err_out_locked;
3691 } else if (is_tcf_skbedit_ptype(act)) {
3692 entry->id = FLOW_ACTION_PTYPE;
3693 entry->ptype = tcf_skbedit_ptype(act);
3694 } else if (is_tcf_skbedit_priority(act)) {
3695 entry->id = FLOW_ACTION_PRIORITY;
3696 entry->priority = tcf_skbedit_priority(act);
3697 } else if (is_tcf_gate(act)) {
3698 entry->id = FLOW_ACTION_GATE;
3699 entry->gate.index = tcf_gate_index(act);
3700 entry->gate.prio = tcf_gate_prio(act);
3701 entry->gate.basetime = tcf_gate_basetime(act);
3702 entry->gate.cycletime = tcf_gate_cycletime(act);
3703 entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
3704 entry->gate.num_entries = tcf_gate_num_entries(act);
3705 err = tcf_gate_get_entries(entry, act);
3710 goto err_out_locked;
3712 spin_unlock_bh(&act->tcfa_lock);
3714 if (!is_tcf_pedit(act))
3720 tc_cleanup_flow_action(flow_action);
3724 spin_unlock_bh(&act->tcfa_lock);
3727 EXPORT_SYMBOL(tc_setup_flow_action);
3729 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3731 unsigned int num_acts = 0;
3732 struct tc_action *act;
3735 tcf_exts_for_each_action(i, act, exts) {
3736 if (is_tcf_pedit(act))
3737 num_acts += tcf_pedit_nkeys(act);
3743 EXPORT_SYMBOL(tcf_exts_num_actions);
3745 static __net_init int tcf_net_init(struct net *net)
3747 struct tcf_net *tn = net_generic(net, tcf_net_id);
3749 spin_lock_init(&tn->idr_lock);
3754 static void __net_exit tcf_net_exit(struct net *net)
3756 struct tcf_net *tn = net_generic(net, tcf_net_id);
3758 idr_destroy(&tn->idr);
3761 static struct pernet_operations tcf_net_ops = {
3762 .init = tcf_net_init,
3763 .exit = tcf_net_exit,
3765 .size = sizeof(struct tcf_net),
3768 static int __init tc_filter_init(void)
3772 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3776 err = register_pernet_subsys(&tcf_net_ops);
3778 goto err_register_pernet_subsys;
3780 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3781 RTNL_FLAG_DOIT_UNLOCKED);
3782 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3783 RTNL_FLAG_DOIT_UNLOCKED);
3784 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3785 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3786 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3787 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3788 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3793 err_register_pernet_subsys:
3794 destroy_workqueue(tc_filter_wq);
3798 subsys_initcall(tc_filter_init);