2 * net/sched/act_police.c Input police filter
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * J Hadi Salim (action changes)
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/init.h>
21 #include <linux/slab.h>
22 #include <net/act_api.h>
23 #include <net/netlink.h>
24 #include <net/pkt_cls.h>
25 #include <net/tc_act/tc_police.h>
27 /* Each policer is serialized by its individual spinlock */
29 static unsigned int police_net_id;
30 static struct tc_action_ops act_police_ops;
32 static int tcf_police_walker(struct net *net, struct sk_buff *skb,
33 struct netlink_callback *cb, int type,
34 const struct tc_action_ops *ops,
35 struct netlink_ext_ack *extack)
37 struct tc_action_net *tn = net_generic(net, police_net_id);
39 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
42 static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
43 [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE },
44 [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE },
45 [TCA_POLICE_AVRATE] = { .type = NLA_U32 },
46 [TCA_POLICE_RESULT] = { .type = NLA_U32 },
49 static int tcf_police_init(struct net *net, struct nlattr *nla,
50 struct nlattr *est, struct tc_action **a,
51 int ovr, int bind, bool rtnl_held,
53 struct netlink_ext_ack *extack)
55 int ret = 0, tcfp_result = TC_ACT_OK, err, size;
56 struct nlattr *tb[TCA_POLICE_MAX + 1];
57 struct tcf_chain *goto_ch = NULL;
58 struct tc_police *parm;
59 struct tcf_police *police;
60 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
61 struct tc_action_net *tn = net_generic(net, police_net_id);
62 struct tcf_police_params *new;
68 err = nla_parse_nested_deprecated(tb, TCA_POLICE_MAX, nla,
73 if (tb[TCA_POLICE_TBF] == NULL)
75 size = nla_len(tb[TCA_POLICE_TBF]);
76 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
79 parm = nla_data(tb[TCA_POLICE_TBF]);
80 err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
88 ret = tcf_idr_create(tn, parm->index, NULL, a,
89 &act_police_ops, bind, true);
91 tcf_idr_cleanup(tn, parm->index);
95 spin_lock_init(&(to_police(*a)->tcfp_lock));
97 tcf_idr_release(*a, bind);
100 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
104 police = to_police(*a);
105 if (parm->rate.rate) {
107 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
111 if (parm->peakrate.rate) {
112 P_tab = qdisc_get_rtab(&parm->peakrate,
113 tb[TCA_POLICE_PEAKRATE], NULL);
120 err = gen_replace_estimator(&police->tcf_bstats,
121 police->common.cpu_bstats,
122 &police->tcf_rate_est,
127 } else if (tb[TCA_POLICE_AVRATE] &&
128 (ret == ACT_P_CREATED ||
129 !gen_estimator_active(&police->tcf_rate_est))) {
134 if (tb[TCA_POLICE_RESULT]) {
135 tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
136 if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) {
137 NL_SET_ERR_MSG(extack,
138 "goto chain not allowed on fallback");
144 new = kzalloc(sizeof(*new), GFP_KERNEL);
145 if (unlikely(!new)) {
150 /* No failure allowed after this point */
151 new->tcfp_result = tcfp_result;
152 new->tcfp_mtu = parm->mtu;
153 if (!new->tcfp_mtu) {
156 new->tcfp_mtu = 255 << R_tab->rate.cell_log;
159 new->rate_present = true;
160 psched_ratecfg_precompute(&new->rate, &R_tab->rate, 0);
161 qdisc_put_rtab(R_tab);
163 new->rate_present = false;
166 new->peak_present = true;
167 psched_ratecfg_precompute(&new->peak, &P_tab->rate, 0);
168 qdisc_put_rtab(P_tab);
170 new->peak_present = false;
173 new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
174 if (new->peak_present)
175 new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
178 if (tb[TCA_POLICE_AVRATE])
179 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
181 spin_lock_bh(&police->tcf_lock);
182 spin_lock_bh(&police->tcfp_lock);
183 police->tcfp_t_c = ktime_get_ns();
184 police->tcfp_toks = new->tcfp_burst;
185 if (new->peak_present)
186 police->tcfp_ptoks = new->tcfp_mtu_ptoks;
187 spin_unlock_bh(&police->tcfp_lock);
188 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
189 rcu_swap_protected(police->params,
191 lockdep_is_held(&police->tcf_lock));
192 spin_unlock_bh(&police->tcf_lock);
195 tcf_chain_put_by_act(goto_ch);
199 if (ret == ACT_P_CREATED)
200 tcf_idr_insert(tn, *a);
204 qdisc_put_rtab(P_tab);
205 qdisc_put_rtab(R_tab);
207 tcf_chain_put_by_act(goto_ch);
209 tcf_idr_release(*a, bind);
213 static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
214 struct tcf_result *res)
216 struct tcf_police *police = to_police(a);
217 struct tcf_police_params *p;
218 s64 now, toks, ptoks = 0;
221 tcf_lastuse_update(&police->tcf_tm);
222 bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb);
224 ret = READ_ONCE(police->tcf_action);
225 p = rcu_dereference_bh(police->params);
227 if (p->tcfp_ewma_rate) {
228 struct gnet_stats_rate_est64 sample;
230 if (!gen_estimator_read(&police->tcf_rate_est, &sample) ||
231 sample.bps >= p->tcfp_ewma_rate)
235 if (qdisc_pkt_len(skb) <= p->tcfp_mtu) {
236 if (!p->rate_present) {
237 ret = p->tcfp_result;
241 now = ktime_get_ns();
242 spin_lock_bh(&police->tcfp_lock);
243 toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
244 if (p->peak_present) {
245 ptoks = toks + police->tcfp_ptoks;
246 if (ptoks > p->tcfp_mtu_ptoks)
247 ptoks = p->tcfp_mtu_ptoks;
248 ptoks -= (s64)psched_l2t_ns(&p->peak,
251 toks += police->tcfp_toks;
252 if (toks > p->tcfp_burst)
253 toks = p->tcfp_burst;
254 toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
255 if ((toks|ptoks) >= 0) {
256 police->tcfp_t_c = now;
257 police->tcfp_toks = toks;
258 police->tcfp_ptoks = ptoks;
259 spin_unlock_bh(&police->tcfp_lock);
260 ret = p->tcfp_result;
263 spin_unlock_bh(&police->tcfp_lock);
267 qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats));
269 if (ret == TC_ACT_SHOT)
270 qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats));
275 static void tcf_police_cleanup(struct tc_action *a)
277 struct tcf_police *police = to_police(a);
278 struct tcf_police_params *p;
280 p = rcu_dereference_protected(police->params, 1);
285 static void tcf_police_stats_update(struct tc_action *a,
286 u64 bytes, u32 packets,
287 u64 lastuse, bool hw)
289 struct tcf_police *police = to_police(a);
290 struct tcf_t *tm = &police->tcf_tm;
292 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
294 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
296 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
299 static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
302 unsigned char *b = skb_tail_pointer(skb);
303 struct tcf_police *police = to_police(a);
304 struct tcf_police_params *p;
305 struct tc_police opt = {
306 .index = police->tcf_index,
307 .refcnt = refcount_read(&police->tcf_refcnt) - ref,
308 .bindcnt = atomic_read(&police->tcf_bindcnt) - bind,
312 spin_lock_bh(&police->tcf_lock);
313 opt.action = police->tcf_action;
314 p = rcu_dereference_protected(police->params,
315 lockdep_is_held(&police->tcf_lock));
316 opt.mtu = p->tcfp_mtu;
317 opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
319 psched_ratecfg_getrate(&opt.rate, &p->rate);
321 psched_ratecfg_getrate(&opt.peakrate, &p->peak);
322 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
323 goto nla_put_failure;
324 if (p->tcfp_result &&
325 nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result))
326 goto nla_put_failure;
327 if (p->tcfp_ewma_rate &&
328 nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
329 goto nla_put_failure;
331 t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
332 t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
333 t.firstuse = jiffies_to_clock_t(jiffies - police->tcf_tm.firstuse);
334 t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
335 if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
336 goto nla_put_failure;
337 spin_unlock_bh(&police->tcf_lock);
342 spin_unlock_bh(&police->tcf_lock);
347 static int tcf_police_search(struct net *net, struct tc_action **a, u32 index)
349 struct tc_action_net *tn = net_generic(net, police_net_id);
351 return tcf_idr_search(tn, a, index);
354 MODULE_AUTHOR("Alexey Kuznetsov");
355 MODULE_DESCRIPTION("Policing actions");
356 MODULE_LICENSE("GPL");
358 static struct tc_action_ops act_police_ops = {
361 .owner = THIS_MODULE,
362 .stats_update = tcf_police_stats_update,
363 .act = tcf_police_act,
364 .dump = tcf_police_dump,
365 .init = tcf_police_init,
366 .walk = tcf_police_walker,
367 .lookup = tcf_police_search,
368 .cleanup = tcf_police_cleanup,
369 .size = sizeof(struct tcf_police),
372 static __net_init int police_init_net(struct net *net)
374 struct tc_action_net *tn = net_generic(net, police_net_id);
376 return tc_action_net_init(tn, &act_police_ops);
379 static void __net_exit police_exit_net(struct list_head *net_list)
381 tc_action_net_exit(net_list, police_net_id);
384 static struct pernet_operations police_net_ops = {
385 .init = police_init_net,
386 .exit_batch = police_exit_net,
387 .id = &police_net_id,
388 .size = sizeof(struct tc_action_net),
391 static int __init police_init_module(void)
393 return tcf_register_action(&act_police_ops, &police_net_ops);
396 static void __exit police_cleanup_module(void)
398 tcf_unregister_action(&act_police_ops, &police_net_ops);
401 module_init(police_init_module);
402 module_exit(police_cleanup_module);