1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/act_api.c Packet action API.
5 * Author: Jamal Hadi Salim
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/kmod.h>
16 #include <linux/err.h>
17 #include <linux/module.h>
18 #include <net/net_namespace.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/tc_act/tc_pedit.h>
23 #include <net/act_api.h>
24 #include <net/netlink.h>
25 #include <net/flow_offload.h>
28 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
29 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count);
32 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
35 if (static_branch_unlikely(&tcf_frag_xmit_count))
36 return sch_frag_xmit_hook(skb, xmit);
41 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit);
43 static void tcf_action_goto_chain_exec(const struct tc_action *a,
44 struct tcf_result *res)
46 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
48 res->goto_tp = rcu_dereference_bh(chain->filter_chain);
51 static void tcf_free_cookie_rcu(struct rcu_head *p)
53 struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
59 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
60 struct tc_cookie *new_cookie)
62 struct tc_cookie *old;
64 old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
66 call_rcu(&old->rcu, tcf_free_cookie_rcu);
69 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
70 struct tcf_chain **newchain,
71 struct netlink_ext_ack *extack)
73 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
77 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
78 else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
81 NL_SET_ERR_MSG(extack, "invalid control action");
85 if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
86 chain_index = action & TC_ACT_EXT_VAL_MASK;
87 if (!tp || !newchain) {
89 NL_SET_ERR_MSG(extack,
90 "can't goto NULL proto/chain");
93 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
96 NL_SET_ERR_MSG(extack,
97 "can't allocate goto_chain");
103 EXPORT_SYMBOL(tcf_action_check_ctrlact);
105 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
106 struct tcf_chain *goto_chain)
108 a->tcfa_action = action;
109 goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
112 EXPORT_SYMBOL(tcf_action_set_ctrlact);
114 /* XXX: For standalone actions, we don't need a RCU grace period either, because
115 * actions are always connected to filters and filters are already destroyed in
116 * RCU callbacks, so after a RCU grace period actions are already disconnected
117 * from filters. Readers later can not find us.
119 static void free_tcf(struct tc_action *p)
121 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
123 free_percpu(p->cpu_bstats);
124 free_percpu(p->cpu_bstats_hw);
125 free_percpu(p->cpu_qstats);
127 tcf_set_action_cookie(&p->act_cookie, NULL);
129 tcf_chain_put_by_act(chain);
134 static void offload_action_hw_count_set(struct tc_action *act,
137 act->in_hw_count = hw_count;
140 static void offload_action_hw_count_inc(struct tc_action *act,
143 act->in_hw_count += hw_count;
146 static void offload_action_hw_count_dec(struct tc_action *act,
149 act->in_hw_count = act->in_hw_count > hw_count ?
150 act->in_hw_count - hw_count : 0;
153 static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act)
155 if (is_tcf_pedit(act))
156 return tcf_pedit_nkeys(act);
161 static bool tc_act_skip_hw(u32 flags)
163 return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false;
166 static bool tc_act_skip_sw(u32 flags)
168 return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false;
171 static bool tc_act_in_hw(struct tc_action *act)
173 return !!act->in_hw_count;
176 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
177 static bool tc_act_flags_valid(u32 flags)
179 flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW;
181 return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW);
184 static int offload_action_init(struct flow_offload_action *fl_action,
185 struct tc_action *act,
186 enum offload_act_command cmd,
187 struct netlink_ext_ack *extack)
191 fl_action->extack = extack;
192 fl_action->command = cmd;
193 fl_action->index = act->tcfa_index;
195 if (act->ops->offload_act_setup) {
196 spin_lock_bh(&act->tcfa_lock);
197 err = act->ops->offload_act_setup(act, fl_action, NULL,
199 spin_unlock_bh(&act->tcfa_lock);
206 static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act,
211 err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT,
222 static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act,
224 flow_indr_block_bind_cb_t *cb,
229 err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL);
239 static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
241 flow_indr_block_bind_cb_t *cb,
244 return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count,
246 tcf_action_offload_cmd_ex(fl_act, hw_count);
249 static int tcf_action_offload_add_ex(struct tc_action *action,
250 struct netlink_ext_ack *extack,
251 flow_indr_block_bind_cb_t *cb,
254 bool skip_sw = tc_act_skip_sw(action->tcfa_flags);
255 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
258 struct flow_offload_action *fl_action;
262 if (tc_act_skip_hw(action->tcfa_flags))
265 num = tcf_offload_act_num_actions_single(action);
266 fl_action = offload_action_alloc(num);
270 err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack);
274 err = tc_setup_action(&fl_action->action, actions, extack);
276 NL_SET_ERR_MSG_MOD(extack,
277 "Failed to setup tc actions for offload");
281 err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv);
283 cb ? offload_action_hw_count_inc(action, in_hw_count) :
284 offload_action_hw_count_set(action, in_hw_count);
286 if (skip_sw && !tc_act_in_hw(action))
289 tc_cleanup_offload_action(&fl_action->action);
297 /* offload the tc action after it is inserted */
298 static int tcf_action_offload_add(struct tc_action *action,
299 struct netlink_ext_ack *extack)
301 return tcf_action_offload_add_ex(action, extack, NULL, NULL);
304 int tcf_action_update_hw_stats(struct tc_action *action)
306 struct flow_offload_action fl_act = {};
309 if (!tc_act_in_hw(action))
312 err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL);
316 err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL);
319 tcf_action_stats_update(action, fl_act.stats.bytes,
322 fl_act.stats.lastused,
325 action->used_hw_stats = fl_act.stats.used_hw_stats;
326 action->used_hw_stats_valid = true;
333 EXPORT_SYMBOL(tcf_action_update_hw_stats);
335 static int tcf_action_offload_del_ex(struct tc_action *action,
336 flow_indr_block_bind_cb_t *cb,
339 struct flow_offload_action fl_act = {};
343 if (!tc_act_in_hw(action))
346 err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL);
350 err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv);
354 if (!cb && action->in_hw_count != in_hw_count)
357 /* do not need to update hw state when deleting action */
358 if (cb && in_hw_count)
359 offload_action_hw_count_dec(action, in_hw_count);
364 static int tcf_action_offload_del(struct tc_action *action)
366 return tcf_action_offload_del_ex(action, NULL, NULL);
369 static void tcf_action_cleanup(struct tc_action *p)
371 tcf_action_offload_del(p);
375 gen_kill_estimator(&p->tcfa_rate_est);
379 static int __tcf_action_put(struct tc_action *p, bool bind)
381 struct tcf_idrinfo *idrinfo = p->idrinfo;
383 if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
385 atomic_dec(&p->tcfa_bindcnt);
386 idr_remove(&idrinfo->action_idr, p->tcfa_index);
387 mutex_unlock(&idrinfo->lock);
389 tcf_action_cleanup(p);
394 atomic_dec(&p->tcfa_bindcnt);
399 static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
403 /* Release with strict==1 and bind==0 is only called through act API
404 * interface (classifiers always bind). Only case when action with
405 * positive reference count and zero bind count can exist is when it was
406 * also created with act API (unbinding last classifier will destroy the
407 * action if it was created by classifier). So only case when bind count
408 * can be changed after initial check is when unbound action is
409 * destroyed by act API while classifier binds to action with same id
410 * concurrently. This result either creation of new action(same behavior
411 * as before), or reusing existing action if concurrent process
412 * increments reference count before action is deleted. Both scenarios
416 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
419 if (__tcf_action_put(p, bind))
426 int tcf_idr_release(struct tc_action *a, bool bind)
428 const struct tc_action_ops *ops = a->ops;
431 ret = __tcf_idr_release(a, bind, false);
432 if (ret == ACT_P_DELETED)
433 module_put(ops->owner);
436 EXPORT_SYMBOL(tcf_idr_release);
438 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
440 struct tc_cookie *act_cookie;
444 act_cookie = rcu_dereference(act->act_cookie);
447 cookie_len = nla_total_size(act_cookie->len);
450 return nla_total_size(0) /* action number nested */
451 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
452 + cookie_len /* TCA_ACT_COOKIE */
453 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
454 + nla_total_size(0) /* TCA_ACT_STATS nested */
455 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
456 /* TCA_STATS_BASIC */
457 + nla_total_size_64bit(sizeof(struct gnet_stats_basic))
458 /* TCA_STATS_PKT64 */
459 + nla_total_size_64bit(sizeof(u64))
460 /* TCA_STATS_QUEUE */
461 + nla_total_size_64bit(sizeof(struct gnet_stats_queue))
462 + nla_total_size(0) /* TCA_OPTIONS nested */
463 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
466 static size_t tcf_action_full_attrs_size(size_t sz)
468 return NLMSG_HDRLEN /* struct nlmsghdr */
469 + sizeof(struct tcamsg)
470 + nla_total_size(0) /* TCA_ACT_TAB nested */
474 static size_t tcf_action_fill_size(const struct tc_action *act)
476 size_t sz = tcf_action_shared_attrs_size(act);
478 if (act->ops->get_fill_size)
479 return act->ops->get_fill_size(act) + sz;
484 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
486 unsigned char *b = skb_tail_pointer(skb);
487 struct tc_cookie *cookie;
489 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
490 goto nla_put_failure;
491 if (tcf_action_copy_stats(skb, a, 0))
492 goto nla_put_failure;
493 if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
494 goto nla_put_failure;
497 cookie = rcu_dereference(a->act_cookie);
499 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
501 goto nla_put_failure;
513 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
514 struct netlink_callback *cb)
516 int err = 0, index = -1, s_i = 0, n_i = 0;
517 u32 act_flags = cb->args[2];
518 unsigned long jiffy_since = cb->args[3];
520 struct idr *idr = &idrinfo->action_idr;
522 unsigned long id = 1;
525 mutex_lock(&idrinfo->lock);
529 idr_for_each_entry_ul(idr, p, tmp, id) {
537 time_after(jiffy_since,
538 (unsigned long)p->tcfa_tm.lastuse))
541 nest = nla_nest_start_noflag(skb, n_i);
544 goto nla_put_failure;
546 err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ?
547 tcf_action_dump_terse(skb, p, true) :
548 tcf_action_dump_1(skb, p, 0, 0);
551 nlmsg_trim(skb, nest);
554 nla_nest_end(skb, nest);
556 if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) &&
557 n_i >= TCA_ACT_MAX_PRIO)
562 cb->args[0] = index + 1;
564 mutex_unlock(&idrinfo->lock);
566 if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON)
572 nla_nest_cancel(skb, nest);
576 static int tcf_idr_release_unsafe(struct tc_action *p)
578 if (atomic_read(&p->tcfa_bindcnt) > 0)
581 if (refcount_dec_and_test(&p->tcfa_refcnt)) {
582 idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
583 tcf_action_cleanup(p);
584 return ACT_P_DELETED;
590 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
591 const struct tc_action_ops *ops,
592 struct netlink_ext_ack *extack)
597 struct idr *idr = &idrinfo->action_idr;
599 unsigned long id = 1;
602 nest = nla_nest_start_noflag(skb, 0);
604 goto nla_put_failure;
605 if (nla_put_string(skb, TCA_KIND, ops->kind))
606 goto nla_put_failure;
609 mutex_lock(&idrinfo->lock);
610 idr_for_each_entry_ul(idr, p, tmp, id) {
613 ret = tcf_idr_release_unsafe(p);
614 if (ret == ACT_P_DELETED)
615 module_put(ops->owner);
620 mutex_unlock(&idrinfo->lock);
623 NL_SET_ERR_MSG(extack, "Unable to flush all TC actions");
625 goto nla_put_failure;
628 ret = nla_put_u32(skb, TCA_FCNT, n_i);
630 goto nla_put_failure;
631 nla_nest_end(skb, nest);
635 nla_nest_cancel(skb, nest);
639 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
640 struct netlink_callback *cb, int type,
641 const struct tc_action_ops *ops,
642 struct netlink_ext_ack *extack)
644 struct tcf_idrinfo *idrinfo = tn->idrinfo;
646 if (type == RTM_DELACTION) {
647 return tcf_del_walker(idrinfo, skb, ops, extack);
648 } else if (type == RTM_GETACTION) {
649 return tcf_dump_walker(idrinfo, skb, cb);
651 WARN(1, "tcf_generic_walker: unknown command %d\n", type);
652 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
656 EXPORT_SYMBOL(tcf_generic_walker);
658 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
660 struct tcf_idrinfo *idrinfo = tn->idrinfo;
663 mutex_lock(&idrinfo->lock);
664 p = idr_find(&idrinfo->action_idr, index);
668 refcount_inc(&p->tcfa_refcnt);
669 mutex_unlock(&idrinfo->lock);
677 EXPORT_SYMBOL(tcf_idr_search);
679 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
684 mutex_lock(&idrinfo->lock);
685 p = idr_find(&idrinfo->action_idr, index);
687 mutex_unlock(&idrinfo->lock);
691 if (!atomic_read(&p->tcfa_bindcnt)) {
692 if (refcount_dec_and_test(&p->tcfa_refcnt)) {
693 struct module *owner = p->ops->owner;
695 WARN_ON(p != idr_remove(&idrinfo->action_idr,
697 mutex_unlock(&idrinfo->lock);
699 tcf_action_cleanup(p);
708 mutex_unlock(&idrinfo->lock);
712 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
713 struct tc_action **a, const struct tc_action_ops *ops,
714 int bind, bool cpustats, u32 flags)
716 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
717 struct tcf_idrinfo *idrinfo = tn->idrinfo;
722 refcount_set(&p->tcfa_refcnt, 1);
724 atomic_set(&p->tcfa_bindcnt, 1);
727 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
730 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
731 if (!p->cpu_bstats_hw)
733 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
737 gnet_stats_basic_sync_init(&p->tcfa_bstats);
738 gnet_stats_basic_sync_init(&p->tcfa_bstats_hw);
739 spin_lock_init(&p->tcfa_lock);
740 p->tcfa_index = index;
741 p->tcfa_tm.install = jiffies;
742 p->tcfa_tm.lastuse = jiffies;
743 p->tcfa_tm.firstuse = 0;
744 p->tcfa_flags = flags;
746 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
748 &p->tcfa_lock, false, est);
753 p->idrinfo = idrinfo;
754 __module_get(ops->owner);
759 free_percpu(p->cpu_qstats);
761 free_percpu(p->cpu_bstats_hw);
763 free_percpu(p->cpu_bstats);
768 EXPORT_SYMBOL(tcf_idr_create);
770 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
771 struct nlattr *est, struct tc_action **a,
772 const struct tc_action_ops *ops, int bind,
775 /* Set cpustats according to actions flags. */
776 return tcf_idr_create(tn, index, est, a, ops, bind,
777 !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
779 EXPORT_SYMBOL(tcf_idr_create_from_flags);
781 /* Cleanup idr index that was allocated but not initialized. */
783 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
785 struct tcf_idrinfo *idrinfo = tn->idrinfo;
787 mutex_lock(&idrinfo->lock);
788 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
789 WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
790 mutex_unlock(&idrinfo->lock);
792 EXPORT_SYMBOL(tcf_idr_cleanup);
794 /* Check if action with specified index exists. If actions is found, increments
795 * its reference and bind counters, and return 1. Otherwise insert temporary
796 * error pointer (to prevent concurrent users from inserting actions with same
797 * index) and return 0.
800 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
801 struct tc_action **a, int bind)
803 struct tcf_idrinfo *idrinfo = tn->idrinfo;
808 mutex_lock(&idrinfo->lock);
810 p = idr_find(&idrinfo->action_idr, *index);
812 /* This means that another process allocated
813 * index but did not assign the pointer yet.
815 mutex_unlock(&idrinfo->lock);
820 refcount_inc(&p->tcfa_refcnt);
822 atomic_inc(&p->tcfa_bindcnt);
827 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
830 idr_replace(&idrinfo->action_idr,
831 ERR_PTR(-EBUSY), *index);
836 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
837 UINT_MAX, GFP_KERNEL);
839 idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
842 mutex_unlock(&idrinfo->lock);
845 EXPORT_SYMBOL(tcf_idr_check_alloc);
847 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
848 struct tcf_idrinfo *idrinfo)
850 struct idr *idr = &idrinfo->action_idr;
853 unsigned long id = 1;
856 idr_for_each_entry_ul(idr, p, tmp, id) {
857 ret = __tcf_idr_release(p, false, true);
858 if (ret == ACT_P_DELETED)
859 module_put(ops->owner);
863 idr_destroy(&idrinfo->action_idr);
865 EXPORT_SYMBOL(tcf_idrinfo_destroy);
867 static LIST_HEAD(act_base);
868 static DEFINE_RWLOCK(act_mod_lock);
869 /* since act ops id is stored in pernet subsystem list,
870 * then there is no way to walk through only all the action
871 * subsystem, so we keep tc action pernet ops id for
872 * reoffload to walk through.
874 static LIST_HEAD(act_pernet_id_list);
875 static DEFINE_MUTEX(act_id_mutex);
876 struct tc_act_pernet_id {
877 struct list_head list;
881 static int tcf_pernet_add_id_list(unsigned int id)
883 struct tc_act_pernet_id *id_ptr;
886 mutex_lock(&act_id_mutex);
887 list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
888 if (id_ptr->id == id) {
894 id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL);
901 list_add_tail(&id_ptr->list, &act_pernet_id_list);
904 mutex_unlock(&act_id_mutex);
908 static void tcf_pernet_del_id_list(unsigned int id)
910 struct tc_act_pernet_id *id_ptr;
912 mutex_lock(&act_id_mutex);
913 list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
914 if (id_ptr->id == id) {
915 list_del(&id_ptr->list);
920 mutex_unlock(&act_id_mutex);
923 int tcf_register_action(struct tc_action_ops *act,
924 struct pernet_operations *ops)
926 struct tc_action_ops *a;
929 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
932 /* We have to register pernet ops before making the action ops visible,
933 * otherwise tcf_action_init_1() could get a partially initialized
936 ret = register_pernet_subsys(ops);
941 ret = tcf_pernet_add_id_list(*ops->id);
946 write_lock(&act_mod_lock);
947 list_for_each_entry(a, &act_base, head) {
948 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
953 list_add_tail(&act->head, &act_base);
954 write_unlock(&act_mod_lock);
959 write_unlock(&act_mod_lock);
961 tcf_pernet_del_id_list(*ops->id);
963 unregister_pernet_subsys(ops);
966 EXPORT_SYMBOL(tcf_register_action);
968 int tcf_unregister_action(struct tc_action_ops *act,
969 struct pernet_operations *ops)
971 struct tc_action_ops *a;
974 write_lock(&act_mod_lock);
975 list_for_each_entry(a, &act_base, head) {
977 list_del(&act->head);
982 write_unlock(&act_mod_lock);
984 unregister_pernet_subsys(ops);
986 tcf_pernet_del_id_list(*ops->id);
990 EXPORT_SYMBOL(tcf_unregister_action);
993 static struct tc_action_ops *tc_lookup_action_n(char *kind)
995 struct tc_action_ops *a, *res = NULL;
998 read_lock(&act_mod_lock);
999 list_for_each_entry(a, &act_base, head) {
1000 if (strcmp(kind, a->kind) == 0) {
1001 if (try_module_get(a->owner))
1006 read_unlock(&act_mod_lock);
1011 /* lookup by nlattr */
1012 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
1014 struct tc_action_ops *a, *res = NULL;
1017 read_lock(&act_mod_lock);
1018 list_for_each_entry(a, &act_base, head) {
1019 if (nla_strcmp(kind, a->kind) == 0) {
1020 if (try_module_get(a->owner))
1025 read_unlock(&act_mod_lock);
1030 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
1031 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
1032 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
1033 int nr_actions, struct tcf_result *res)
1036 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
1038 int ret = TC_ACT_OK;
1040 if (skb_skip_tc_classify(skb))
1044 for (i = 0; i < nr_actions; i++) {
1045 const struct tc_action *a = actions[i];
1048 if (jmp_prgcnt > 0) {
1053 if (tc_act_skip_sw(a->tcfa_flags))
1058 ret = a->ops->act(skb, a, res);
1059 if (unlikely(ret == TC_ACT_REPEAT)) {
1060 if (--repeat_ttl != 0)
1062 /* suspicious opcode, stop pipeline */
1063 net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
1066 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
1067 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
1068 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
1069 /* faulty opcode, stop pipeline */
1074 goto restart_act_graph;
1075 else /* faulty graph, stop pipeline */
1078 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
1079 if (unlikely(!rcu_access_pointer(a->goto_chain))) {
1080 net_warn_ratelimited("can't go to NULL chain!\n");
1083 tcf_action_goto_chain_exec(a, res);
1086 if (ret != TC_ACT_PIPE)
1092 EXPORT_SYMBOL(tcf_action_exec);
1094 int tcf_action_destroy(struct tc_action *actions[], int bind)
1096 const struct tc_action_ops *ops;
1097 struct tc_action *a;
1100 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1104 ret = __tcf_idr_release(a, bind, true);
1105 if (ret == ACT_P_DELETED)
1106 module_put(ops->owner);
1113 static int tcf_action_put(struct tc_action *p)
1115 return __tcf_action_put(p, false);
1118 /* Put all actions in this array, skip those NULL's. */
1119 static void tcf_action_put_many(struct tc_action *actions[])
1123 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1124 struct tc_action *a = actions[i];
1125 const struct tc_action_ops *ops;
1130 if (tcf_action_put(a))
1131 module_put(ops->owner);
1136 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1138 return a->ops->dump(skb, a, bind, ref);
1142 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1145 unsigned char *b = skb_tail_pointer(skb);
1146 struct nlattr *nest;
1149 if (tcf_action_dump_terse(skb, a, false))
1150 goto nla_put_failure;
1152 if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
1153 nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
1154 a->hw_stats, TCA_ACT_HW_STATS_ANY))
1155 goto nla_put_failure;
1157 if (a->used_hw_stats_valid &&
1158 nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
1159 a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
1160 goto nla_put_failure;
1162 flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
1164 nla_put_bitfield32(skb, TCA_ACT_FLAGS,
1166 goto nla_put_failure;
1168 if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
1169 goto nla_put_failure;
1171 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1173 goto nla_put_failure;
1174 err = tcf_action_dump_old(skb, a, bind, ref);
1176 nla_nest_end(skb, nest);
1184 EXPORT_SYMBOL(tcf_action_dump_1);
1186 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
1187 int bind, int ref, bool terse)
1189 struct tc_action *a;
1190 int err = -EINVAL, i;
1191 struct nlattr *nest;
1193 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1195 nest = nla_nest_start_noflag(skb, i + 1);
1197 goto nla_put_failure;
1198 err = terse ? tcf_action_dump_terse(skb, a, false) :
1199 tcf_action_dump_1(skb, a, bind, ref);
1202 nla_nest_end(skb, nest);
1210 nla_nest_cancel(skb, nest);
1214 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
1216 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
1220 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
1225 c->len = nla_len(tb[TCA_ACT_COOKIE]);
1230 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
1232 struct nla_bitfield32 hw_stats_bf;
1234 /* If the user did not pass the attr, that means he does
1235 * not care about the type. Return "any" in that case
1236 * which is setting on all supported types.
1239 return TCA_ACT_HW_STATS_ANY;
1240 hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
1241 return hw_stats_bf.value;
1244 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
1245 [TCA_ACT_KIND] = { .type = NLA_STRING },
1246 [TCA_ACT_INDEX] = { .type = NLA_U32 },
1247 [TCA_ACT_COOKIE] = { .type = NLA_BINARY,
1248 .len = TC_COOKIE_MAX_SIZE },
1249 [TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
1250 [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS |
1251 TCA_ACT_FLAGS_SKIP_HW |
1252 TCA_ACT_FLAGS_SKIP_SW),
1253 [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
1256 void tcf_idr_insert_many(struct tc_action *actions[])
1260 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1261 struct tc_action *a = actions[i];
1262 struct tcf_idrinfo *idrinfo;
1266 idrinfo = a->idrinfo;
1267 mutex_lock(&idrinfo->lock);
1268 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
1269 * it is just created, otherwise this is just a nop.
1271 idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
1272 mutex_unlock(&idrinfo->lock);
1276 struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
1278 struct netlink_ext_ack *extack)
1280 struct nlattr *tb[TCA_ACT_MAX + 1];
1281 struct tc_action_ops *a_o;
1282 char act_name[IFNAMSIZ];
1283 struct nlattr *kind;
1287 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1288 tcf_action_policy, extack);
1290 return ERR_PTR(err);
1292 kind = tb[TCA_ACT_KIND];
1294 NL_SET_ERR_MSG(extack, "TC action kind must be specified");
1295 return ERR_PTR(err);
1297 if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
1298 NL_SET_ERR_MSG(extack, "TC action name too long");
1299 return ERR_PTR(err);
1302 if (strlcpy(act_name, "police", IFNAMSIZ) >= IFNAMSIZ) {
1303 NL_SET_ERR_MSG(extack, "TC action name too long");
1304 return ERR_PTR(-EINVAL);
1308 a_o = tc_lookup_action_n(act_name);
1310 #ifdef CONFIG_MODULES
1313 request_module("act_%s", act_name);
1317 a_o = tc_lookup_action_n(act_name);
1319 /* We dropped the RTNL semaphore in order to
1320 * perform the module load. So, even if we
1321 * succeeded in loading the module we have to
1322 * tell the caller to replay the request. We
1323 * indicate this using -EAGAIN.
1326 module_put(a_o->owner);
1327 return ERR_PTR(-EAGAIN);
1330 NL_SET_ERR_MSG(extack, "Failed to load TC action module");
1331 return ERR_PTR(-ENOENT);
1337 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
1338 struct nlattr *nla, struct nlattr *est,
1339 struct tc_action_ops *a_o, int *init_res,
1340 u32 flags, struct netlink_ext_ack *extack)
1342 bool police = flags & TCA_ACT_FLAGS_POLICE;
1343 struct nla_bitfield32 userflags = { 0, 0 };
1344 u8 hw_stats = TCA_ACT_HW_STATS_ANY;
1345 struct nlattr *tb[TCA_ACT_MAX + 1];
1346 struct tc_cookie *cookie = NULL;
1347 struct tc_action *a;
1350 /* backward compatibility for policer */
1352 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1353 tcf_action_policy, extack);
1355 return ERR_PTR(err);
1356 if (tb[TCA_ACT_COOKIE]) {
1357 cookie = nla_memdup_cookie(tb);
1359 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
1364 hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
1365 if (tb[TCA_ACT_FLAGS]) {
1366 userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
1367 if (!tc_act_flags_valid(userflags.value)) {
1373 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp,
1374 userflags.value | flags, extack);
1376 err = a_o->init(net, nla, est, &a, tp, userflags.value | flags,
1383 if (!police && tb[TCA_ACT_COOKIE])
1384 tcf_set_action_cookie(&a->act_cookie, cookie);
1387 a->hw_stats = hw_stats;
1393 kfree(cookie->data);
1396 return ERR_PTR(err);
1399 static bool tc_act_bind(u32 flags)
1401 return !!(flags & TCA_ACT_FLAGS_BIND);
1404 /* Returns numbers of initialized actions or negative error. */
1406 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
1407 struct nlattr *est, struct tc_action *actions[],
1408 int init_res[], size_t *attr_size,
1409 u32 flags, u32 fl_flags,
1410 struct netlink_ext_ack *extack)
1412 struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
1413 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1414 struct tc_action *act;
1419 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1424 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1425 struct tc_action_ops *a_o;
1427 a_o = tc_action_load_ops(tb[i], flags & TCA_ACT_FLAGS_POLICE,
1428 !(flags & TCA_ACT_FLAGS_NO_RTNL),
1437 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1438 act = tcf_action_init_1(net, tp, tb[i], est, ops[i - 1],
1439 &init_res[i - 1], flags, extack);
1444 sz += tcf_action_fill_size(act);
1445 /* Start from index 0 */
1446 actions[i - 1] = act;
1447 if (tc_act_bind(flags)) {
1448 bool skip_sw = tc_skip_sw(fl_flags);
1449 bool skip_hw = tc_skip_hw(fl_flags);
1451 if (tc_act_bind(act->tcfa_flags))
1453 if (skip_sw != tc_act_skip_sw(act->tcfa_flags) ||
1454 skip_hw != tc_act_skip_hw(act->tcfa_flags)) {
1455 NL_SET_ERR_MSG(extack,
1456 "Mismatch between action and filter offload flags");
1461 err = tcf_action_offload_add(act, extack);
1462 if (tc_act_skip_sw(act->tcfa_flags) && err)
1467 /* We have to commit them all together, because if any error happened in
1468 * between, we could not handle the failure gracefully.
1470 tcf_idr_insert_many(actions);
1472 *attr_size = tcf_action_full_attrs_size(sz);
1477 tcf_action_destroy(actions, flags & TCA_ACT_FLAGS_BIND);
1479 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1481 module_put(ops[i]->owner);
1486 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
1489 if (a->cpu_bstats) {
1490 _bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
1492 this_cpu_ptr(a->cpu_qstats)->drops += drops;
1495 _bstats_update(this_cpu_ptr(a->cpu_bstats_hw),
1500 _bstats_update(&a->tcfa_bstats, bytes, packets);
1501 a->tcfa_qstats.drops += drops;
1503 _bstats_update(&a->tcfa_bstats_hw, bytes, packets);
1505 EXPORT_SYMBOL(tcf_action_update_stats);
1507 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
1516 /* update hw stats for this action */
1517 tcf_action_update_hw_stats(p);
1519 /* compat_mode being true specifies a call that is supposed
1520 * to add additional backward compatibility statistic TLVs.
1523 if (p->type == TCA_OLD_COMPAT)
1524 err = gnet_stats_start_copy_compat(skb, 0,
1532 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1533 &p->tcfa_lock, &d, TCA_ACT_PAD);
1538 if (gnet_stats_copy_basic(&d, p->cpu_bstats,
1539 &p->tcfa_bstats, false) < 0 ||
1540 gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
1541 &p->tcfa_bstats_hw, false) < 0 ||
1542 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1543 gnet_stats_copy_queue(&d, p->cpu_qstats,
1545 p->tcfa_qstats.qlen) < 0)
1548 if (gnet_stats_finish_copy(&d) < 0)
1557 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1558 u32 portid, u32 seq, u16 flags, int event, int bind,
1562 struct nlmsghdr *nlh;
1563 unsigned char *b = skb_tail_pointer(skb);
1564 struct nlattr *nest;
1566 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1568 goto out_nlmsg_trim;
1569 t = nlmsg_data(nlh);
1570 t->tca_family = AF_UNSPEC;
1574 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1576 goto out_nlmsg_trim;
1578 if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
1579 goto out_nlmsg_trim;
1581 nla_nest_end(skb, nest);
1583 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1592 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1593 struct tc_action *actions[], int event,
1594 struct netlink_ext_ack *extack)
1596 struct sk_buff *skb;
1598 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1601 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1603 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1608 return rtnl_unicast(skb, net, portid);
1611 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1612 struct nlmsghdr *n, u32 portid,
1613 struct netlink_ext_ack *extack)
1615 struct nlattr *tb[TCA_ACT_MAX + 1];
1616 const struct tc_action_ops *ops;
1617 struct tc_action *a;
1621 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1622 tcf_action_policy, extack);
1627 if (tb[TCA_ACT_INDEX] == NULL ||
1628 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1629 NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1632 index = nla_get_u32(tb[TCA_ACT_INDEX]);
1635 ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1636 if (!ops) { /* could happen in batch of actions */
1637 NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
1641 if (ops->lookup(net, &a, index) == 0) {
1642 NL_SET_ERR_MSG(extack, "TC action with specified index not found");
1646 module_put(ops->owner);
1650 module_put(ops->owner);
1652 return ERR_PTR(err);
1655 static int tca_action_flush(struct net *net, struct nlattr *nla,
1656 struct nlmsghdr *n, u32 portid,
1657 struct netlink_ext_ack *extack)
1659 struct sk_buff *skb;
1661 struct nlmsghdr *nlh;
1663 struct netlink_callback dcb;
1664 struct nlattr *nest;
1665 struct nlattr *tb[TCA_ACT_MAX + 1];
1666 const struct tc_action_ops *ops;
1667 struct nlattr *kind;
1670 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1674 b = skb_tail_pointer(skb);
1676 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1677 tcf_action_policy, extack);
1682 kind = tb[TCA_ACT_KIND];
1683 ops = tc_lookup_action(kind);
1684 if (!ops) { /*some idjot trying to flush unknown action */
1685 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1689 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1692 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1693 goto out_module_put;
1695 t = nlmsg_data(nlh);
1696 t->tca_family = AF_UNSPEC;
1700 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1702 NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1703 goto out_module_put;
1706 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack);
1708 nla_nest_cancel(skb, nest);
1709 goto out_module_put;
1712 nla_nest_end(skb, nest);
1714 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1715 nlh->nlmsg_flags |= NLM_F_ROOT;
1716 module_put(ops->owner);
1717 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1718 n->nlmsg_flags & NLM_F_ECHO);
1720 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1725 module_put(ops->owner);
1731 static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1735 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1736 struct tc_action *a = actions[i];
1737 const struct tc_action_ops *ops = a->ops;
1738 /* Actions can be deleted concurrently so we must save their
1739 * type and id to search again after reference is released.
1741 struct tcf_idrinfo *idrinfo = a->idrinfo;
1742 u32 act_index = a->tcfa_index;
1745 if (tcf_action_put(a)) {
1746 /* last reference, action was deleted concurrently */
1747 module_put(ops->owner);
1751 /* now do the delete */
1752 ret = tcf_idr_delete_index(idrinfo, act_index);
1761 tcf_reoffload_del_notify(struct net *net, struct tc_action *action)
1763 size_t attr_size = tcf_action_fill_size(action);
1764 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
1767 const struct tc_action_ops *ops = action->ops;
1768 struct sk_buff *skb;
1771 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1776 if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1) <= 0) {
1781 ret = tcf_idr_release_unsafe(action);
1782 if (ret == ACT_P_DELETED) {
1783 module_put(ops->owner);
1784 ret = rtnetlink_send(skb, net, 0, RTNLGRP_TC, 0);
1792 int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
1793 void *cb_priv, bool add)
1795 struct tc_act_pernet_id *id_ptr;
1796 struct tcf_idrinfo *idrinfo;
1797 struct tc_action_net *tn;
1798 struct tc_action *p;
1799 unsigned int act_id;
1809 down_read(&net_rwsem);
1810 mutex_lock(&act_id_mutex);
1813 list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
1814 act_id = id_ptr->id;
1815 tn = net_generic(net, act_id);
1818 idrinfo = tn->idrinfo;
1822 mutex_lock(&idrinfo->lock);
1823 idr = &idrinfo->action_idr;
1824 idr_for_each_entry_ul(idr, p, tmp, id) {
1825 if (IS_ERR(p) || tc_act_bind(p->tcfa_flags))
1828 tcf_action_offload_add_ex(p, NULL, cb,
1833 /* cb unregister to update hw count */
1834 ret = tcf_action_offload_del_ex(p, cb, cb_priv);
1837 if (tc_act_skip_sw(p->tcfa_flags) &&
1839 tcf_reoffload_del_notify(net, p);
1841 mutex_unlock(&idrinfo->lock);
1844 mutex_unlock(&act_id_mutex);
1845 up_read(&net_rwsem);
1851 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1852 u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1855 struct sk_buff *skb;
1857 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1862 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1864 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1869 /* now do the delete */
1870 ret = tcf_action_delete(net, actions);
1872 NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1877 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1878 n->nlmsg_flags & NLM_F_ECHO);
1883 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1884 u32 portid, int event, struct netlink_ext_ack *extack)
1887 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1888 struct tc_action *act;
1889 size_t attr_size = 0;
1890 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1892 ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1897 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1899 return tca_action_flush(net, tb[1], n, portid, extack);
1901 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1905 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1906 act = tcf_action_get_1(net, tb[i], n, portid, extack);
1911 attr_size += tcf_action_fill_size(act);
1912 actions[i - 1] = act;
1915 attr_size = tcf_action_full_attrs_size(attr_size);
1917 if (event == RTM_GETACTION)
1918 ret = tcf_get_notify(net, portid, n, actions, event, extack);
1920 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
1926 tcf_action_put_many(actions);
1931 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1932 u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1934 struct sk_buff *skb;
1936 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1941 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
1942 RTM_NEWACTION, 0, 0) <= 0) {
1943 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1948 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1949 n->nlmsg_flags & NLM_F_ECHO);
1952 static int tcf_action_add(struct net *net, struct nlattr *nla,
1953 struct nlmsghdr *n, u32 portid, u32 flags,
1954 struct netlink_ext_ack *extack)
1956 size_t attr_size = 0;
1958 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1959 int init_res[TCA_ACT_MAX_PRIO] = {};
1961 for (loop = 0; loop < 10; loop++) {
1962 ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res,
1963 &attr_size, flags, 0, extack);
1970 ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
1972 /* only put existing actions */
1973 for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
1974 if (init_res[i] == ACT_P_CREATED)
1976 tcf_action_put_many(actions);
1981 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
1982 [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON |
1983 TCA_ACT_FLAG_TERSE_DUMP),
1984 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 },
1987 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
1988 struct netlink_ext_ack *extack)
1990 struct net *net = sock_net(skb->sk);
1991 struct nlattr *tca[TCA_ROOT_MAX + 1];
1992 u32 portid = NETLINK_CB(skb).portid;
1996 if ((n->nlmsg_type != RTM_GETACTION) &&
1997 !netlink_capable(skb, CAP_NET_ADMIN))
2000 ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
2001 TCA_ROOT_MAX, NULL, extack);
2005 if (tca[TCA_ACT_TAB] == NULL) {
2006 NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
2010 /* n->nlmsg_flags & NLM_F_CREATE */
2011 switch (n->nlmsg_type) {
2013 /* we are going to assume all other flags
2014 * imply create only if it doesn't exist
2015 * Note that CREATE | EXCL implies that
2016 * but since we want avoid ambiguity (eg when flags
2017 * is zero) then just set this
2019 if (n->nlmsg_flags & NLM_F_REPLACE)
2020 flags = TCA_ACT_FLAGS_REPLACE;
2021 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, flags,
2025 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2026 portid, RTM_DELACTION, extack);
2029 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2030 portid, RTM_GETACTION, extack);
2039 static struct nlattr *find_dump_kind(struct nlattr **nla)
2041 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
2042 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
2043 struct nlattr *kind;
2045 tb1 = nla[TCA_ACT_TAB];
2049 if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
2054 if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
2056 kind = tb2[TCA_ACT_KIND];
2061 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
2063 struct net *net = sock_net(skb->sk);
2064 struct nlmsghdr *nlh;
2065 unsigned char *b = skb_tail_pointer(skb);
2066 struct nlattr *nest;
2067 struct tc_action_ops *a_o;
2069 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
2070 struct nlattr *tb[TCA_ROOT_MAX + 1];
2071 struct nlattr *count_attr = NULL;
2072 unsigned long jiffy_since = 0;
2073 struct nlattr *kind = NULL;
2074 struct nla_bitfield32 bf;
2075 u32 msecs_since = 0;
2078 ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
2079 TCA_ROOT_MAX, tcaa_policy, cb->extack);
2083 kind = find_dump_kind(tb);
2085 pr_info("tc_dump_action: action bad kind\n");
2089 a_o = tc_lookup_action(kind);
2094 if (tb[TCA_ROOT_FLAGS]) {
2095 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
2096 cb->args[2] = bf.value;
2099 if (tb[TCA_ROOT_TIME_DELTA]) {
2100 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
2103 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2104 cb->nlh->nlmsg_type, sizeof(*t), 0);
2106 goto out_module_put;
2109 jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
2111 t = nlmsg_data(nlh);
2112 t->tca_family = AF_UNSPEC;
2115 cb->args[3] = jiffy_since;
2116 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
2118 goto out_module_put;
2120 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
2122 goto out_module_put;
2124 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL);
2126 goto out_module_put;
2129 nla_nest_end(skb, nest);
2131 act_count = cb->args[1];
2132 memcpy(nla_data(count_attr), &act_count, sizeof(u32));
2137 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2138 if (NETLINK_CB(cb->skb).portid && ret)
2139 nlh->nlmsg_flags |= NLM_F_MULTI;
2140 module_put(a_o->owner);
2144 module_put(a_o->owner);
2149 static int __init tc_action_init(void)
2151 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
2152 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
2153 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
2159 subsys_initcall(tc_action_init);