Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / net / sched / cls_matchall.c
1 /*
2  * net/sched/cls_matchll.c              Match-all classifier
3  *
4  * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15
16 #include <net/sch_generic.h>
17 #include <net/pkt_cls.h>
18
19 struct cls_mall_head {
20         struct tcf_exts exts;
21         struct tcf_result res;
22         u32 handle;
23         u32 flags;
24         union {
25                 struct work_struct work;
26                 struct rcu_head rcu;
27         };
28 };
29
30 static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
31                          struct tcf_result *res)
32 {
33         struct cls_mall_head *head = rcu_dereference_bh(tp->root);
34
35         if (tc_skip_sw(head->flags))
36                 return -1;
37
38         *res = head->res;
39         return tcf_exts_exec(skb, &head->exts, res);
40 }
41
42 static int mall_init(struct tcf_proto *tp)
43 {
44         return 0;
45 }
46
47 static void __mall_destroy(struct cls_mall_head *head)
48 {
49         tcf_exts_destroy(&head->exts);
50         tcf_exts_put_net(&head->exts);
51         kfree(head);
52 }
53
54 static void mall_destroy_work(struct work_struct *work)
55 {
56         struct cls_mall_head *head = container_of(work, struct cls_mall_head,
57                                                   work);
58         rtnl_lock();
59         __mall_destroy(head);
60         rtnl_unlock();
61 }
62
63 static void mall_destroy_rcu(struct rcu_head *rcu)
64 {
65         struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
66                                                   rcu);
67
68         INIT_WORK(&head->work, mall_destroy_work);
69         tcf_queue_work(&head->work);
70 }
71
72 static int mall_replace_hw_filter(struct tcf_proto *tp,
73                                   struct cls_mall_head *head,
74                                   unsigned long cookie)
75 {
76         struct net_device *dev = tp->q->dev_queue->dev;
77         struct tc_cls_matchall_offload cls_mall = {};
78         int err;
79
80         tc_cls_common_offload_init(&cls_mall.common, tp);
81         cls_mall.command = TC_CLSMATCHALL_REPLACE;
82         cls_mall.exts = &head->exts;
83         cls_mall.cookie = cookie;
84
85         err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL,
86                                             &cls_mall);
87         if (!err)
88                 head->flags |= TCA_CLS_FLAGS_IN_HW;
89
90         return err;
91 }
92
93 static void mall_destroy_hw_filter(struct tcf_proto *tp,
94                                    struct cls_mall_head *head,
95                                    unsigned long cookie)
96 {
97         struct net_device *dev = tp->q->dev_queue->dev;
98         struct tc_cls_matchall_offload cls_mall = {};
99
100         tc_cls_common_offload_init(&cls_mall.common, tp);
101         cls_mall.command = TC_CLSMATCHALL_DESTROY;
102         cls_mall.cookie = cookie;
103
104         dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, &cls_mall);
105 }
106
107 static void mall_destroy(struct tcf_proto *tp)
108 {
109         struct cls_mall_head *head = rtnl_dereference(tp->root);
110         struct net_device *dev = tp->q->dev_queue->dev;
111
112         if (!head)
113                 return;
114
115         if (tc_should_offload(dev, head->flags))
116                 mall_destroy_hw_filter(tp, head, (unsigned long) head);
117
118         if (tcf_exts_get_net(&head->exts))
119                 call_rcu(&head->rcu, mall_destroy_rcu);
120         else
121                 __mall_destroy(head);
122 }
123
124 static void *mall_get(struct tcf_proto *tp, u32 handle)
125 {
126         return NULL;
127 }
128
129 static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
130         [TCA_MATCHALL_UNSPEC]           = { .type = NLA_UNSPEC },
131         [TCA_MATCHALL_CLASSID]          = { .type = NLA_U32 },
132 };
133
134 static int mall_set_parms(struct net *net, struct tcf_proto *tp,
135                           struct cls_mall_head *head,
136                           unsigned long base, struct nlattr **tb,
137                           struct nlattr *est, bool ovr)
138 {
139         int err;
140
141         err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr);
142         if (err < 0)
143                 return err;
144
145         if (tb[TCA_MATCHALL_CLASSID]) {
146                 head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
147                 tcf_bind_filter(tp, &head->res, base);
148         }
149         return 0;
150 }
151
152 static int mall_change(struct net *net, struct sk_buff *in_skb,
153                        struct tcf_proto *tp, unsigned long base,
154                        u32 handle, struct nlattr **tca,
155                        void **arg, bool ovr)
156 {
157         struct cls_mall_head *head = rtnl_dereference(tp->root);
158         struct net_device *dev = tp->q->dev_queue->dev;
159         struct nlattr *tb[TCA_MATCHALL_MAX + 1];
160         struct cls_mall_head *new;
161         u32 flags = 0;
162         int err;
163
164         if (!tca[TCA_OPTIONS])
165                 return -EINVAL;
166
167         if (head)
168                 return -EEXIST;
169
170         err = nla_parse_nested(tb, TCA_MATCHALL_MAX, tca[TCA_OPTIONS],
171                                mall_policy, NULL);
172         if (err < 0)
173                 return err;
174
175         if (tb[TCA_MATCHALL_FLAGS]) {
176                 flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
177                 if (!tc_flags_valid(flags))
178                         return -EINVAL;
179         }
180
181         new = kzalloc(sizeof(*new), GFP_KERNEL);
182         if (!new)
183                 return -ENOBUFS;
184
185         err = tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
186         if (err)
187                 goto err_exts_init;
188
189         if (!handle)
190                 handle = 1;
191         new->handle = handle;
192         new->flags = flags;
193
194         err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
195         if (err)
196                 goto err_set_parms;
197
198         if (tc_should_offload(dev, flags)) {
199                 err = mall_replace_hw_filter(tp, new, (unsigned long) new);
200                 if (err) {
201                         if (tc_skip_sw(flags))
202                                 goto err_replace_hw_filter;
203                         else
204                                 err = 0;
205                 }
206         }
207
208         if (!tc_in_hw(new->flags))
209                 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
210
211         *arg = head;
212         rcu_assign_pointer(tp->root, new);
213         return 0;
214
215 err_replace_hw_filter:
216 err_set_parms:
217         tcf_exts_destroy(&new->exts);
218 err_exts_init:
219         kfree(new);
220         return err;
221 }
222
223 static int mall_delete(struct tcf_proto *tp, void *arg, bool *last)
224 {
225         return -EOPNOTSUPP;
226 }
227
228 static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
229 {
230         struct cls_mall_head *head = rtnl_dereference(tp->root);
231
232         if (arg->count < arg->skip)
233                 goto skip;
234         if (arg->fn(tp, head, arg) < 0)
235                 arg->stop = 1;
236 skip:
237         arg->count++;
238 }
239
240 static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
241                      struct sk_buff *skb, struct tcmsg *t)
242 {
243         struct cls_mall_head *head = fh;
244         struct nlattr *nest;
245
246         if (!head)
247                 return skb->len;
248
249         t->tcm_handle = head->handle;
250
251         nest = nla_nest_start(skb, TCA_OPTIONS);
252         if (!nest)
253                 goto nla_put_failure;
254
255         if (head->res.classid &&
256             nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
257                 goto nla_put_failure;
258
259         if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
260                 goto nla_put_failure;
261
262         if (tcf_exts_dump(skb, &head->exts))
263                 goto nla_put_failure;
264
265         nla_nest_end(skb, nest);
266
267         if (tcf_exts_dump_stats(skb, &head->exts) < 0)
268                 goto nla_put_failure;
269
270         return skb->len;
271
272 nla_put_failure:
273         nla_nest_cancel(skb, nest);
274         return -1;
275 }
276
277 static void mall_bind_class(void *fh, u32 classid, unsigned long cl)
278 {
279         struct cls_mall_head *head = fh;
280
281         if (head && head->res.classid == classid)
282                 head->res.class = cl;
283 }
284
285 static struct tcf_proto_ops cls_mall_ops __read_mostly = {
286         .kind           = "matchall",
287         .classify       = mall_classify,
288         .init           = mall_init,
289         .destroy        = mall_destroy,
290         .get            = mall_get,
291         .change         = mall_change,
292         .delete         = mall_delete,
293         .walk           = mall_walk,
294         .dump           = mall_dump,
295         .bind_class     = mall_bind_class,
296         .owner          = THIS_MODULE,
297 };
298
299 static int __init cls_mall_init(void)
300 {
301         return register_tcf_proto_ops(&cls_mall_ops);
302 }
303
304 static void __exit cls_mall_exit(void)
305 {
306         unregister_tcf_proto_ops(&cls_mall_ops);
307 }
308
309 module_init(cls_mall_init);
310 module_exit(cls_mall_exit);
311
312 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
313 MODULE_DESCRIPTION("Match-all classifier");
314 MODULE_LICENSE("GPL v2");