64584a1df42548427014d98c4c8e088aa5351a6f
[linux-2.6-microblaze.git] / net / sched / cls_api.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c  Packet classifier API.
4  *
5  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/rhashtable.h>
24 #include <net/net_namespace.h>
25 #include <net/sock.h>
26 #include <net/netlink.h>
27 #include <net/pkt_sched.h>
28 #include <net/pkt_cls.h>
29 #include <net/tc_act/tc_pedit.h>
30 #include <net/tc_act/tc_mirred.h>
31 #include <net/tc_act/tc_vlan.h>
32 #include <net/tc_act/tc_tunnel_key.h>
33 #include <net/tc_act/tc_csum.h>
34 #include <net/tc_act/tc_gact.h>
35 #include <net/tc_act/tc_police.h>
36 #include <net/tc_act/tc_sample.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <net/tc_act/tc_ct.h>
39 #include <net/tc_act/tc_mpls.h>
40 #include <net/flow_offload.h>
41
42 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
43
44 /* The list of all installed classifier types */
45 static LIST_HEAD(tcf_proto_base);
46
47 /* Protects list of registered TC modules. It is pure SMP lock. */
48 static DEFINE_RWLOCK(cls_mod_lock);
49
50 /* Find classifier type by string name */
51
52 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
53 {
54         const struct tcf_proto_ops *t, *res = NULL;
55
56         if (kind) {
57                 read_lock(&cls_mod_lock);
58                 list_for_each_entry(t, &tcf_proto_base, head) {
59                         if (strcmp(kind, t->kind) == 0) {
60                                 if (try_module_get(t->owner))
61                                         res = t;
62                                 break;
63                         }
64                 }
65                 read_unlock(&cls_mod_lock);
66         }
67         return res;
68 }
69
70 static const struct tcf_proto_ops *
71 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
72                      struct netlink_ext_ack *extack)
73 {
74         const struct tcf_proto_ops *ops;
75
76         ops = __tcf_proto_lookup_ops(kind);
77         if (ops)
78                 return ops;
79 #ifdef CONFIG_MODULES
80         if (rtnl_held)
81                 rtnl_unlock();
82         request_module("cls_%s", kind);
83         if (rtnl_held)
84                 rtnl_lock();
85         ops = __tcf_proto_lookup_ops(kind);
86         /* We dropped the RTNL semaphore in order to perform
87          * the module load. So, even if we succeeded in loading
88          * the module we have to replay the request. We indicate
89          * this using -EAGAIN.
90          */
91         if (ops) {
92                 module_put(ops->owner);
93                 return ERR_PTR(-EAGAIN);
94         }
95 #endif
96         NL_SET_ERR_MSG(extack, "TC classifier not found");
97         return ERR_PTR(-ENOENT);
98 }
99
100 /* Register(unregister) new classifier type */
101
102 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
103 {
104         struct tcf_proto_ops *t;
105         int rc = -EEXIST;
106
107         write_lock(&cls_mod_lock);
108         list_for_each_entry(t, &tcf_proto_base, head)
109                 if (!strcmp(ops->kind, t->kind))
110                         goto out;
111
112         list_add_tail(&ops->head, &tcf_proto_base);
113         rc = 0;
114 out:
115         write_unlock(&cls_mod_lock);
116         return rc;
117 }
118 EXPORT_SYMBOL(register_tcf_proto_ops);
119
120 static struct workqueue_struct *tc_filter_wq;
121
122 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
123 {
124         struct tcf_proto_ops *t;
125         int rc = -ENOENT;
126
127         /* Wait for outstanding call_rcu()s, if any, from a
128          * tcf_proto_ops's destroy() handler.
129          */
130         rcu_barrier();
131         flush_workqueue(tc_filter_wq);
132
133         write_lock(&cls_mod_lock);
134         list_for_each_entry(t, &tcf_proto_base, head) {
135                 if (t == ops) {
136                         list_del(&t->head);
137                         rc = 0;
138                         break;
139                 }
140         }
141         write_unlock(&cls_mod_lock);
142         return rc;
143 }
144 EXPORT_SYMBOL(unregister_tcf_proto_ops);
145
146 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
147 {
148         INIT_RCU_WORK(rwork, func);
149         return queue_rcu_work(tc_filter_wq, rwork);
150 }
151 EXPORT_SYMBOL(tcf_queue_work);
152
153 /* Select new prio value from the range, managed by kernel. */
154
155 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
156 {
157         u32 first = TC_H_MAKE(0xC0000000U, 0U);
158
159         if (tp)
160                 first = tp->prio - 1;
161
162         return TC_H_MAJ(first);
163 }
164
165 static bool tcf_proto_is_unlocked(const char *kind)
166 {
167         const struct tcf_proto_ops *ops;
168         bool ret;
169
170         ops = tcf_proto_lookup_ops(kind, false, NULL);
171         /* On error return false to take rtnl lock. Proto lookup/create
172          * functions will perform lookup again and properly handle errors.
173          */
174         if (IS_ERR(ops))
175                 return false;
176
177         ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
178         module_put(ops->owner);
179         return ret;
180 }
181
182 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
183                                           u32 prio, struct tcf_chain *chain,
184                                           bool rtnl_held,
185                                           struct netlink_ext_ack *extack)
186 {
187         struct tcf_proto *tp;
188         int err;
189
190         tp = kzalloc(sizeof(*tp), GFP_KERNEL);
191         if (!tp)
192                 return ERR_PTR(-ENOBUFS);
193
194         tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
195         if (IS_ERR(tp->ops)) {
196                 err = PTR_ERR(tp->ops);
197                 goto errout;
198         }
199         tp->classify = tp->ops->classify;
200         tp->protocol = protocol;
201         tp->prio = prio;
202         tp->chain = chain;
203         spin_lock_init(&tp->lock);
204         refcount_set(&tp->refcnt, 1);
205
206         err = tp->ops->init(tp);
207         if (err) {
208                 module_put(tp->ops->owner);
209                 goto errout;
210         }
211         return tp;
212
213 errout:
214         kfree(tp);
215         return ERR_PTR(err);
216 }
217
218 static void tcf_proto_get(struct tcf_proto *tp)
219 {
220         refcount_inc(&tp->refcnt);
221 }
222
223 static void tcf_chain_put(struct tcf_chain *chain);
224
225 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
226                               struct netlink_ext_ack *extack)
227 {
228         tp->ops->destroy(tp, rtnl_held, extack);
229         tcf_chain_put(tp->chain);
230         module_put(tp->ops->owner);
231         kfree_rcu(tp, rcu);
232 }
233
234 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
235                           struct netlink_ext_ack *extack)
236 {
237         if (refcount_dec_and_test(&tp->refcnt))
238                 tcf_proto_destroy(tp, rtnl_held, extack);
239 }
240
241 static int walker_check_empty(struct tcf_proto *tp, void *fh,
242                               struct tcf_walker *arg)
243 {
244         if (fh) {
245                 arg->nonempty = true;
246                 return -1;
247         }
248         return 0;
249 }
250
251 static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
252 {
253         struct tcf_walker walker = { .fn = walker_check_empty, };
254
255         if (tp->ops->walk) {
256                 tp->ops->walk(tp, &walker, rtnl_held);
257                 return !walker.nonempty;
258         }
259         return true;
260 }
261
262 static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
263 {
264         spin_lock(&tp->lock);
265         if (tcf_proto_is_empty(tp, rtnl_held))
266                 tp->deleting = true;
267         spin_unlock(&tp->lock);
268         return tp->deleting;
269 }
270
271 static void tcf_proto_mark_delete(struct tcf_proto *tp)
272 {
273         spin_lock(&tp->lock);
274         tp->deleting = true;
275         spin_unlock(&tp->lock);
276 }
277
278 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
279 {
280         bool deleting;
281
282         spin_lock(&tp->lock);
283         deleting = tp->deleting;
284         spin_unlock(&tp->lock);
285
286         return deleting;
287 }
288
289 #define ASSERT_BLOCK_LOCKED(block)                                      \
290         lockdep_assert_held(&(block)->lock)
291
292 struct tcf_filter_chain_list_item {
293         struct list_head list;
294         tcf_chain_head_change_t *chain_head_change;
295         void *chain_head_change_priv;
296 };
297
298 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
299                                           u32 chain_index)
300 {
301         struct tcf_chain *chain;
302
303         ASSERT_BLOCK_LOCKED(block);
304
305         chain = kzalloc(sizeof(*chain), GFP_KERNEL);
306         if (!chain)
307                 return NULL;
308         list_add_tail(&chain->list, &block->chain_list);
309         mutex_init(&chain->filter_chain_lock);
310         chain->block = block;
311         chain->index = chain_index;
312         chain->refcnt = 1;
313         if (!chain->index)
314                 block->chain0.chain = chain;
315         return chain;
316 }
317
318 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
319                                        struct tcf_proto *tp_head)
320 {
321         if (item->chain_head_change)
322                 item->chain_head_change(tp_head, item->chain_head_change_priv);
323 }
324
325 static void tcf_chain0_head_change(struct tcf_chain *chain,
326                                    struct tcf_proto *tp_head)
327 {
328         struct tcf_filter_chain_list_item *item;
329         struct tcf_block *block = chain->block;
330
331         if (chain->index)
332                 return;
333
334         mutex_lock(&block->lock);
335         list_for_each_entry(item, &block->chain0.filter_chain_list, list)
336                 tcf_chain_head_change_item(item, tp_head);
337         mutex_unlock(&block->lock);
338 }
339
340 /* Returns true if block can be safely freed. */
341
342 static bool tcf_chain_detach(struct tcf_chain *chain)
343 {
344         struct tcf_block *block = chain->block;
345
346         ASSERT_BLOCK_LOCKED(block);
347
348         list_del(&chain->list);
349         if (!chain->index)
350                 block->chain0.chain = NULL;
351
352         if (list_empty(&block->chain_list) &&
353             refcount_read(&block->refcnt) == 0)
354                 return true;
355
356         return false;
357 }
358
359 static void tcf_block_destroy(struct tcf_block *block)
360 {
361         mutex_destroy(&block->lock);
362         kfree_rcu(block, rcu);
363 }
364
365 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
366 {
367         struct tcf_block *block = chain->block;
368
369         mutex_destroy(&chain->filter_chain_lock);
370         kfree_rcu(chain, rcu);
371         if (free_block)
372                 tcf_block_destroy(block);
373 }
374
375 static void tcf_chain_hold(struct tcf_chain *chain)
376 {
377         ASSERT_BLOCK_LOCKED(chain->block);
378
379         ++chain->refcnt;
380 }
381
382 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
383 {
384         ASSERT_BLOCK_LOCKED(chain->block);
385
386         /* In case all the references are action references, this
387          * chain should not be shown to the user.
388          */
389         return chain->refcnt == chain->action_refcnt;
390 }
391
392 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
393                                           u32 chain_index)
394 {
395         struct tcf_chain *chain;
396
397         ASSERT_BLOCK_LOCKED(block);
398
399         list_for_each_entry(chain, &block->chain_list, list) {
400                 if (chain->index == chain_index)
401                         return chain;
402         }
403         return NULL;
404 }
405
406 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
407                            u32 seq, u16 flags, int event, bool unicast);
408
409 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
410                                          u32 chain_index, bool create,
411                                          bool by_act)
412 {
413         struct tcf_chain *chain = NULL;
414         bool is_first_reference;
415
416         mutex_lock(&block->lock);
417         chain = tcf_chain_lookup(block, chain_index);
418         if (chain) {
419                 tcf_chain_hold(chain);
420         } else {
421                 if (!create)
422                         goto errout;
423                 chain = tcf_chain_create(block, chain_index);
424                 if (!chain)
425                         goto errout;
426         }
427
428         if (by_act)
429                 ++chain->action_refcnt;
430         is_first_reference = chain->refcnt - chain->action_refcnt == 1;
431         mutex_unlock(&block->lock);
432
433         /* Send notification only in case we got the first
434          * non-action reference. Until then, the chain acts only as
435          * a placeholder for actions pointing to it and user ought
436          * not know about them.
437          */
438         if (is_first_reference && !by_act)
439                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
440                                 RTM_NEWCHAIN, false);
441
442         return chain;
443
444 errout:
445         mutex_unlock(&block->lock);
446         return chain;
447 }
448
449 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
450                                        bool create)
451 {
452         return __tcf_chain_get(block, chain_index, create, false);
453 }
454
455 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
456 {
457         return __tcf_chain_get(block, chain_index, true, true);
458 }
459 EXPORT_SYMBOL(tcf_chain_get_by_act);
460
461 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
462                                void *tmplt_priv);
463 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
464                                   void *tmplt_priv, u32 chain_index,
465                                   struct tcf_block *block, struct sk_buff *oskb,
466                                   u32 seq, u16 flags, bool unicast);
467
468 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
469                             bool explicitly_created)
470 {
471         struct tcf_block *block = chain->block;
472         const struct tcf_proto_ops *tmplt_ops;
473         bool free_block = false;
474         unsigned int refcnt;
475         void *tmplt_priv;
476
477         mutex_lock(&block->lock);
478         if (explicitly_created) {
479                 if (!chain->explicitly_created) {
480                         mutex_unlock(&block->lock);
481                         return;
482                 }
483                 chain->explicitly_created = false;
484         }
485
486         if (by_act)
487                 chain->action_refcnt--;
488
489         /* tc_chain_notify_delete can't be called while holding block lock.
490          * However, when block is unlocked chain can be changed concurrently, so
491          * save these to temporary variables.
492          */
493         refcnt = --chain->refcnt;
494         tmplt_ops = chain->tmplt_ops;
495         tmplt_priv = chain->tmplt_priv;
496
497         /* The last dropped non-action reference will trigger notification. */
498         if (refcnt - chain->action_refcnt == 0 && !by_act) {
499                 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
500                                        block, NULL, 0, 0, false);
501                 /* Last reference to chain, no need to lock. */
502                 chain->flushing = false;
503         }
504
505         if (refcnt == 0)
506                 free_block = tcf_chain_detach(chain);
507         mutex_unlock(&block->lock);
508
509         if (refcnt == 0) {
510                 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
511                 tcf_chain_destroy(chain, free_block);
512         }
513 }
514
515 static void tcf_chain_put(struct tcf_chain *chain)
516 {
517         __tcf_chain_put(chain, false, false);
518 }
519
520 void tcf_chain_put_by_act(struct tcf_chain *chain)
521 {
522         __tcf_chain_put(chain, true, false);
523 }
524 EXPORT_SYMBOL(tcf_chain_put_by_act);
525
526 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
527 {
528         __tcf_chain_put(chain, false, true);
529 }
530
531 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
532 {
533         struct tcf_proto *tp, *tp_next;
534
535         mutex_lock(&chain->filter_chain_lock);
536         tp = tcf_chain_dereference(chain->filter_chain, chain);
537         RCU_INIT_POINTER(chain->filter_chain, NULL);
538         tcf_chain0_head_change(chain, NULL);
539         chain->flushing = true;
540         mutex_unlock(&chain->filter_chain_lock);
541
542         while (tp) {
543                 tp_next = rcu_dereference_protected(tp->next, 1);
544                 tcf_proto_put(tp, rtnl_held, NULL);
545                 tp = tp_next;
546         }
547 }
548
549 static int tcf_block_setup(struct tcf_block *block,
550                            struct flow_block_offload *bo);
551
552 static void tc_indr_block_ing_cmd(struct net_device *dev,
553                                   struct tcf_block *block,
554                                   flow_indr_block_bind_cb_t *cb,
555                                   void *cb_priv,
556                                   enum flow_block_command command)
557 {
558         struct flow_block_offload bo = {
559                 .command        = command,
560                 .binder_type    = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
561                 .net            = dev_net(dev),
562                 .block_shared   = tcf_block_non_null_shared(block),
563         };
564         INIT_LIST_HEAD(&bo.cb_list);
565
566         if (!block)
567                 return;
568
569         bo.block = &block->flow_block;
570
571         down_write(&block->cb_lock);
572         cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
573
574         tcf_block_setup(block, &bo);
575         up_write(&block->cb_lock);
576 }
577
578 static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
579 {
580         const struct Qdisc_class_ops *cops;
581         struct Qdisc *qdisc;
582
583         if (!dev_ingress_queue(dev))
584                 return NULL;
585
586         qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
587         if (!qdisc)
588                 return NULL;
589
590         cops = qdisc->ops->cl_ops;
591         if (!cops)
592                 return NULL;
593
594         if (!cops->tcf_block)
595                 return NULL;
596
597         return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
598 }
599
600 static void tc_indr_block_get_and_ing_cmd(struct net_device *dev,
601                                           flow_indr_block_bind_cb_t *cb,
602                                           void *cb_priv,
603                                           enum flow_block_command command)
604 {
605         struct tcf_block *block = tc_dev_ingress_block(dev);
606
607         tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command);
608 }
609
610 static void tc_indr_block_call(struct tcf_block *block,
611                                struct net_device *dev,
612                                struct tcf_block_ext_info *ei,
613                                enum flow_block_command command,
614                                struct netlink_ext_ack *extack)
615 {
616         struct flow_block_offload bo = {
617                 .command        = command,
618                 .binder_type    = ei->binder_type,
619                 .net            = dev_net(dev),
620                 .block          = &block->flow_block,
621                 .block_shared   = tcf_block_shared(block),
622                 .extack         = extack,
623         };
624         INIT_LIST_HEAD(&bo.cb_list);
625
626         flow_indr_block_call(dev, &bo, command);
627         tcf_block_setup(block, &bo);
628 }
629
630 static bool tcf_block_offload_in_use(struct tcf_block *block)
631 {
632         return atomic_read(&block->offloadcnt);
633 }
634
635 static int tcf_block_offload_cmd(struct tcf_block *block,
636                                  struct net_device *dev,
637                                  struct tcf_block_ext_info *ei,
638                                  enum flow_block_command command,
639                                  struct netlink_ext_ack *extack)
640 {
641         struct flow_block_offload bo = {};
642         int err;
643
644         bo.net = dev_net(dev);
645         bo.command = command;
646         bo.binder_type = ei->binder_type;
647         bo.block = &block->flow_block;
648         bo.block_shared = tcf_block_shared(block);
649         bo.extack = extack;
650         INIT_LIST_HEAD(&bo.cb_list);
651
652         err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
653         if (err < 0)
654                 return err;
655
656         return tcf_block_setup(block, &bo);
657 }
658
659 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
660                                   struct tcf_block_ext_info *ei,
661                                   struct netlink_ext_ack *extack)
662 {
663         struct net_device *dev = q->dev_queue->dev;
664         int err;
665
666         down_write(&block->cb_lock);
667         if (!dev->netdev_ops->ndo_setup_tc)
668                 goto no_offload_dev_inc;
669
670         /* If tc offload feature is disabled and the block we try to bind
671          * to already has some offloaded filters, forbid to bind.
672          */
673         if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
674                 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
675                 err = -EOPNOTSUPP;
676                 goto err_unlock;
677         }
678
679         err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
680         if (err == -EOPNOTSUPP)
681                 goto no_offload_dev_inc;
682         if (err)
683                 goto err_unlock;
684
685         tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
686         up_write(&block->cb_lock);
687         return 0;
688
689 no_offload_dev_inc:
690         if (tcf_block_offload_in_use(block)) {
691                 err = -EOPNOTSUPP;
692                 goto err_unlock;
693         }
694         err = 0;
695         block->nooffloaddevcnt++;
696         tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
697 err_unlock:
698         up_write(&block->cb_lock);
699         return err;
700 }
701
702 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
703                                      struct tcf_block_ext_info *ei)
704 {
705         struct net_device *dev = q->dev_queue->dev;
706         int err;
707
708         down_write(&block->cb_lock);
709         tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
710
711         if (!dev->netdev_ops->ndo_setup_tc)
712                 goto no_offload_dev_dec;
713         err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
714         if (err == -EOPNOTSUPP)
715                 goto no_offload_dev_dec;
716         up_write(&block->cb_lock);
717         return;
718
719 no_offload_dev_dec:
720         WARN_ON(block->nooffloaddevcnt-- == 0);
721         up_write(&block->cb_lock);
722 }
723
724 static int
725 tcf_chain0_head_change_cb_add(struct tcf_block *block,
726                               struct tcf_block_ext_info *ei,
727                               struct netlink_ext_ack *extack)
728 {
729         struct tcf_filter_chain_list_item *item;
730         struct tcf_chain *chain0;
731
732         item = kmalloc(sizeof(*item), GFP_KERNEL);
733         if (!item) {
734                 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
735                 return -ENOMEM;
736         }
737         item->chain_head_change = ei->chain_head_change;
738         item->chain_head_change_priv = ei->chain_head_change_priv;
739
740         mutex_lock(&block->lock);
741         chain0 = block->chain0.chain;
742         if (chain0)
743                 tcf_chain_hold(chain0);
744         else
745                 list_add(&item->list, &block->chain0.filter_chain_list);
746         mutex_unlock(&block->lock);
747
748         if (chain0) {
749                 struct tcf_proto *tp_head;
750
751                 mutex_lock(&chain0->filter_chain_lock);
752
753                 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
754                 if (tp_head)
755                         tcf_chain_head_change_item(item, tp_head);
756
757                 mutex_lock(&block->lock);
758                 list_add(&item->list, &block->chain0.filter_chain_list);
759                 mutex_unlock(&block->lock);
760
761                 mutex_unlock(&chain0->filter_chain_lock);
762                 tcf_chain_put(chain0);
763         }
764
765         return 0;
766 }
767
768 static void
769 tcf_chain0_head_change_cb_del(struct tcf_block *block,
770                               struct tcf_block_ext_info *ei)
771 {
772         struct tcf_filter_chain_list_item *item;
773
774         mutex_lock(&block->lock);
775         list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
776                 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
777                     (item->chain_head_change == ei->chain_head_change &&
778                      item->chain_head_change_priv == ei->chain_head_change_priv)) {
779                         if (block->chain0.chain)
780                                 tcf_chain_head_change_item(item, NULL);
781                         list_del(&item->list);
782                         mutex_unlock(&block->lock);
783
784                         kfree(item);
785                         return;
786                 }
787         }
788         mutex_unlock(&block->lock);
789         WARN_ON(1);
790 }
791
792 struct tcf_net {
793         spinlock_t idr_lock; /* Protects idr */
794         struct idr idr;
795 };
796
797 static unsigned int tcf_net_id;
798
799 static int tcf_block_insert(struct tcf_block *block, struct net *net,
800                             struct netlink_ext_ack *extack)
801 {
802         struct tcf_net *tn = net_generic(net, tcf_net_id);
803         int err;
804
805         idr_preload(GFP_KERNEL);
806         spin_lock(&tn->idr_lock);
807         err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
808                             GFP_NOWAIT);
809         spin_unlock(&tn->idr_lock);
810         idr_preload_end();
811
812         return err;
813 }
814
815 static void tcf_block_remove(struct tcf_block *block, struct net *net)
816 {
817         struct tcf_net *tn = net_generic(net, tcf_net_id);
818
819         spin_lock(&tn->idr_lock);
820         idr_remove(&tn->idr, block->index);
821         spin_unlock(&tn->idr_lock);
822 }
823
824 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
825                                           u32 block_index,
826                                           struct netlink_ext_ack *extack)
827 {
828         struct tcf_block *block;
829
830         block = kzalloc(sizeof(*block), GFP_KERNEL);
831         if (!block) {
832                 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
833                 return ERR_PTR(-ENOMEM);
834         }
835         mutex_init(&block->lock);
836         init_rwsem(&block->cb_lock);
837         flow_block_init(&block->flow_block);
838         INIT_LIST_HEAD(&block->chain_list);
839         INIT_LIST_HEAD(&block->owner_list);
840         INIT_LIST_HEAD(&block->chain0.filter_chain_list);
841
842         refcount_set(&block->refcnt, 1);
843         block->net = net;
844         block->index = block_index;
845
846         /* Don't store q pointer for blocks which are shared */
847         if (!tcf_block_shared(block))
848                 block->q = q;
849         return block;
850 }
851
852 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
853 {
854         struct tcf_net *tn = net_generic(net, tcf_net_id);
855
856         return idr_find(&tn->idr, block_index);
857 }
858
859 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
860 {
861         struct tcf_block *block;
862
863         rcu_read_lock();
864         block = tcf_block_lookup(net, block_index);
865         if (block && !refcount_inc_not_zero(&block->refcnt))
866                 block = NULL;
867         rcu_read_unlock();
868
869         return block;
870 }
871
872 static struct tcf_chain *
873 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
874 {
875         mutex_lock(&block->lock);
876         if (chain)
877                 chain = list_is_last(&chain->list, &block->chain_list) ?
878                         NULL : list_next_entry(chain, list);
879         else
880                 chain = list_first_entry_or_null(&block->chain_list,
881                                                  struct tcf_chain, list);
882
883         /* skip all action-only chains */
884         while (chain && tcf_chain_held_by_acts_only(chain))
885                 chain = list_is_last(&chain->list, &block->chain_list) ?
886                         NULL : list_next_entry(chain, list);
887
888         if (chain)
889                 tcf_chain_hold(chain);
890         mutex_unlock(&block->lock);
891
892         return chain;
893 }
894
895 /* Function to be used by all clients that want to iterate over all chains on
896  * block. It properly obtains block->lock and takes reference to chain before
897  * returning it. Users of this function must be tolerant to concurrent chain
898  * insertion/deletion or ensure that no concurrent chain modification is
899  * possible. Note that all netlink dump callbacks cannot guarantee to provide
900  * consistent dump because rtnl lock is released each time skb is filled with
901  * data and sent to user-space.
902  */
903
904 struct tcf_chain *
905 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
906 {
907         struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
908
909         if (chain)
910                 tcf_chain_put(chain);
911
912         return chain_next;
913 }
914 EXPORT_SYMBOL(tcf_get_next_chain);
915
916 static struct tcf_proto *
917 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
918 {
919         u32 prio = 0;
920
921         ASSERT_RTNL();
922         mutex_lock(&chain->filter_chain_lock);
923
924         if (!tp) {
925                 tp = tcf_chain_dereference(chain->filter_chain, chain);
926         } else if (tcf_proto_is_deleting(tp)) {
927                 /* 'deleting' flag is set and chain->filter_chain_lock was
928                  * unlocked, which means next pointer could be invalid. Restart
929                  * search.
930                  */
931                 prio = tp->prio + 1;
932                 tp = tcf_chain_dereference(chain->filter_chain, chain);
933
934                 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
935                         if (!tp->deleting && tp->prio >= prio)
936                                 break;
937         } else {
938                 tp = tcf_chain_dereference(tp->next, chain);
939         }
940
941         if (tp)
942                 tcf_proto_get(tp);
943
944         mutex_unlock(&chain->filter_chain_lock);
945
946         return tp;
947 }
948
949 /* Function to be used by all clients that want to iterate over all tp's on
950  * chain. Users of this function must be tolerant to concurrent tp
951  * insertion/deletion or ensure that no concurrent chain modification is
952  * possible. Note that all netlink dump callbacks cannot guarantee to provide
953  * consistent dump because rtnl lock is released each time skb is filled with
954  * data and sent to user-space.
955  */
956
957 struct tcf_proto *
958 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
959                    bool rtnl_held)
960 {
961         struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
962
963         if (tp)
964                 tcf_proto_put(tp, rtnl_held, NULL);
965
966         return tp_next;
967 }
968 EXPORT_SYMBOL(tcf_get_next_proto);
969
970 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
971 {
972         struct tcf_chain *chain;
973
974         /* Last reference to block. At this point chains cannot be added or
975          * removed concurrently.
976          */
977         for (chain = tcf_get_next_chain(block, NULL);
978              chain;
979              chain = tcf_get_next_chain(block, chain)) {
980                 tcf_chain_put_explicitly_created(chain);
981                 tcf_chain_flush(chain, rtnl_held);
982         }
983 }
984
985 /* Lookup Qdisc and increments its reference counter.
986  * Set parent, if necessary.
987  */
988
989 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
990                             u32 *parent, int ifindex, bool rtnl_held,
991                             struct netlink_ext_ack *extack)
992 {
993         const struct Qdisc_class_ops *cops;
994         struct net_device *dev;
995         int err = 0;
996
997         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
998                 return 0;
999
1000         rcu_read_lock();
1001
1002         /* Find link */
1003         dev = dev_get_by_index_rcu(net, ifindex);
1004         if (!dev) {
1005                 rcu_read_unlock();
1006                 return -ENODEV;
1007         }
1008
1009         /* Find qdisc */
1010         if (!*parent) {
1011                 *q = dev->qdisc;
1012                 *parent = (*q)->handle;
1013         } else {
1014                 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1015                 if (!*q) {
1016                         NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1017                         err = -EINVAL;
1018                         goto errout_rcu;
1019                 }
1020         }
1021
1022         *q = qdisc_refcount_inc_nz(*q);
1023         if (!*q) {
1024                 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1025                 err = -EINVAL;
1026                 goto errout_rcu;
1027         }
1028
1029         /* Is it classful? */
1030         cops = (*q)->ops->cl_ops;
1031         if (!cops) {
1032                 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1033                 err = -EINVAL;
1034                 goto errout_qdisc;
1035         }
1036
1037         if (!cops->tcf_block) {
1038                 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1039                 err = -EOPNOTSUPP;
1040                 goto errout_qdisc;
1041         }
1042
1043 errout_rcu:
1044         /* At this point we know that qdisc is not noop_qdisc,
1045          * which means that qdisc holds a reference to net_device
1046          * and we hold a reference to qdisc, so it is safe to release
1047          * rcu read lock.
1048          */
1049         rcu_read_unlock();
1050         return err;
1051
1052 errout_qdisc:
1053         rcu_read_unlock();
1054
1055         if (rtnl_held)
1056                 qdisc_put(*q);
1057         else
1058                 qdisc_put_unlocked(*q);
1059         *q = NULL;
1060
1061         return err;
1062 }
1063
1064 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1065                                int ifindex, struct netlink_ext_ack *extack)
1066 {
1067         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1068                 return 0;
1069
1070         /* Do we search for filter, attached to class? */
1071         if (TC_H_MIN(parent)) {
1072                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1073
1074                 *cl = cops->find(q, parent);
1075                 if (*cl == 0) {
1076                         NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1077                         return -ENOENT;
1078                 }
1079         }
1080
1081         return 0;
1082 }
1083
1084 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1085                                           unsigned long cl, int ifindex,
1086                                           u32 block_index,
1087                                           struct netlink_ext_ack *extack)
1088 {
1089         struct tcf_block *block;
1090
1091         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1092                 block = tcf_block_refcnt_get(net, block_index);
1093                 if (!block) {
1094                         NL_SET_ERR_MSG(extack, "Block of given index was not found");
1095                         return ERR_PTR(-EINVAL);
1096                 }
1097         } else {
1098                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1099
1100                 block = cops->tcf_block(q, cl, extack);
1101                 if (!block)
1102                         return ERR_PTR(-EINVAL);
1103
1104                 if (tcf_block_shared(block)) {
1105                         NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1106                         return ERR_PTR(-EOPNOTSUPP);
1107                 }
1108
1109                 /* Always take reference to block in order to support execution
1110                  * of rules update path of cls API without rtnl lock. Caller
1111                  * must release block when it is finished using it. 'if' block
1112                  * of this conditional obtain reference to block by calling
1113                  * tcf_block_refcnt_get().
1114                  */
1115                 refcount_inc(&block->refcnt);
1116         }
1117
1118         return block;
1119 }
1120
1121 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1122                             struct tcf_block_ext_info *ei, bool rtnl_held)
1123 {
1124         if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1125                 /* Flushing/putting all chains will cause the block to be
1126                  * deallocated when last chain is freed. However, if chain_list
1127                  * is empty, block has to be manually deallocated. After block
1128                  * reference counter reached 0, it is no longer possible to
1129                  * increment it or add new chains to block.
1130                  */
1131                 bool free_block = list_empty(&block->chain_list);
1132
1133                 mutex_unlock(&block->lock);
1134                 if (tcf_block_shared(block))
1135                         tcf_block_remove(block, block->net);
1136
1137                 if (q)
1138                         tcf_block_offload_unbind(block, q, ei);
1139
1140                 if (free_block)
1141                         tcf_block_destroy(block);
1142                 else
1143                         tcf_block_flush_all_chains(block, rtnl_held);
1144         } else if (q) {
1145                 tcf_block_offload_unbind(block, q, ei);
1146         }
1147 }
1148
1149 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1150 {
1151         __tcf_block_put(block, NULL, NULL, rtnl_held);
1152 }
1153
1154 /* Find tcf block.
1155  * Set q, parent, cl when appropriate.
1156  */
1157
1158 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1159                                         u32 *parent, unsigned long *cl,
1160                                         int ifindex, u32 block_index,
1161                                         struct netlink_ext_ack *extack)
1162 {
1163         struct tcf_block *block;
1164         int err = 0;
1165
1166         ASSERT_RTNL();
1167
1168         err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1169         if (err)
1170                 goto errout;
1171
1172         err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1173         if (err)
1174                 goto errout_qdisc;
1175
1176         block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1177         if (IS_ERR(block)) {
1178                 err = PTR_ERR(block);
1179                 goto errout_qdisc;
1180         }
1181
1182         return block;
1183
1184 errout_qdisc:
1185         if (*q)
1186                 qdisc_put(*q);
1187 errout:
1188         *q = NULL;
1189         return ERR_PTR(err);
1190 }
1191
1192 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1193                               bool rtnl_held)
1194 {
1195         if (!IS_ERR_OR_NULL(block))
1196                 tcf_block_refcnt_put(block, rtnl_held);
1197
1198         if (q) {
1199                 if (rtnl_held)
1200                         qdisc_put(q);
1201                 else
1202                         qdisc_put_unlocked(q);
1203         }
1204 }
1205
1206 struct tcf_block_owner_item {
1207         struct list_head list;
1208         struct Qdisc *q;
1209         enum flow_block_binder_type binder_type;
1210 };
1211
1212 static void
1213 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1214                                struct Qdisc *q,
1215                                enum flow_block_binder_type binder_type)
1216 {
1217         if (block->keep_dst &&
1218             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1219             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1220                 netif_keep_dst(qdisc_dev(q));
1221 }
1222
1223 void tcf_block_netif_keep_dst(struct tcf_block *block)
1224 {
1225         struct tcf_block_owner_item *item;
1226
1227         block->keep_dst = true;
1228         list_for_each_entry(item, &block->owner_list, list)
1229                 tcf_block_owner_netif_keep_dst(block, item->q,
1230                                                item->binder_type);
1231 }
1232 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1233
1234 static int tcf_block_owner_add(struct tcf_block *block,
1235                                struct Qdisc *q,
1236                                enum flow_block_binder_type binder_type)
1237 {
1238         struct tcf_block_owner_item *item;
1239
1240         item = kmalloc(sizeof(*item), GFP_KERNEL);
1241         if (!item)
1242                 return -ENOMEM;
1243         item->q = q;
1244         item->binder_type = binder_type;
1245         list_add(&item->list, &block->owner_list);
1246         return 0;
1247 }
1248
1249 static void tcf_block_owner_del(struct tcf_block *block,
1250                                 struct Qdisc *q,
1251                                 enum flow_block_binder_type binder_type)
1252 {
1253         struct tcf_block_owner_item *item;
1254
1255         list_for_each_entry(item, &block->owner_list, list) {
1256                 if (item->q == q && item->binder_type == binder_type) {
1257                         list_del(&item->list);
1258                         kfree(item);
1259                         return;
1260                 }
1261         }
1262         WARN_ON(1);
1263 }
1264
1265 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1266                       struct tcf_block_ext_info *ei,
1267                       struct netlink_ext_ack *extack)
1268 {
1269         struct net *net = qdisc_net(q);
1270         struct tcf_block *block = NULL;
1271         int err;
1272
1273         if (ei->block_index)
1274                 /* block_index not 0 means the shared block is requested */
1275                 block = tcf_block_refcnt_get(net, ei->block_index);
1276
1277         if (!block) {
1278                 block = tcf_block_create(net, q, ei->block_index, extack);
1279                 if (IS_ERR(block))
1280                         return PTR_ERR(block);
1281                 if (tcf_block_shared(block)) {
1282                         err = tcf_block_insert(block, net, extack);
1283                         if (err)
1284                                 goto err_block_insert;
1285                 }
1286         }
1287
1288         err = tcf_block_owner_add(block, q, ei->binder_type);
1289         if (err)
1290                 goto err_block_owner_add;
1291
1292         tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1293
1294         err = tcf_chain0_head_change_cb_add(block, ei, extack);
1295         if (err)
1296                 goto err_chain0_head_change_cb_add;
1297
1298         err = tcf_block_offload_bind(block, q, ei, extack);
1299         if (err)
1300                 goto err_block_offload_bind;
1301
1302         *p_block = block;
1303         return 0;
1304
1305 err_block_offload_bind:
1306         tcf_chain0_head_change_cb_del(block, ei);
1307 err_chain0_head_change_cb_add:
1308         tcf_block_owner_del(block, q, ei->binder_type);
1309 err_block_owner_add:
1310 err_block_insert:
1311         tcf_block_refcnt_put(block, true);
1312         return err;
1313 }
1314 EXPORT_SYMBOL(tcf_block_get_ext);
1315
1316 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1317 {
1318         struct tcf_proto __rcu **p_filter_chain = priv;
1319
1320         rcu_assign_pointer(*p_filter_chain, tp_head);
1321 }
1322
1323 int tcf_block_get(struct tcf_block **p_block,
1324                   struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1325                   struct netlink_ext_ack *extack)
1326 {
1327         struct tcf_block_ext_info ei = {
1328                 .chain_head_change = tcf_chain_head_change_dflt,
1329                 .chain_head_change_priv = p_filter_chain,
1330         };
1331
1332         WARN_ON(!p_filter_chain);
1333         return tcf_block_get_ext(p_block, q, &ei, extack);
1334 }
1335 EXPORT_SYMBOL(tcf_block_get);
1336
1337 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1338  * actions should be all removed after flushing.
1339  */
1340 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1341                        struct tcf_block_ext_info *ei)
1342 {
1343         if (!block)
1344                 return;
1345         tcf_chain0_head_change_cb_del(block, ei);
1346         tcf_block_owner_del(block, q, ei->binder_type);
1347
1348         __tcf_block_put(block, q, ei, true);
1349 }
1350 EXPORT_SYMBOL(tcf_block_put_ext);
1351
1352 void tcf_block_put(struct tcf_block *block)
1353 {
1354         struct tcf_block_ext_info ei = {0, };
1355
1356         if (!block)
1357                 return;
1358         tcf_block_put_ext(block, block->q, &ei);
1359 }
1360
1361 EXPORT_SYMBOL(tcf_block_put);
1362
1363 static int
1364 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1365                             void *cb_priv, bool add, bool offload_in_use,
1366                             struct netlink_ext_ack *extack)
1367 {
1368         struct tcf_chain *chain, *chain_prev;
1369         struct tcf_proto *tp, *tp_prev;
1370         int err;
1371
1372         lockdep_assert_held(&block->cb_lock);
1373
1374         for (chain = __tcf_get_next_chain(block, NULL);
1375              chain;
1376              chain_prev = chain,
1377                      chain = __tcf_get_next_chain(block, chain),
1378                      tcf_chain_put(chain_prev)) {
1379                 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1380                      tp_prev = tp,
1381                              tp = __tcf_get_next_proto(chain, tp),
1382                              tcf_proto_put(tp_prev, true, NULL)) {
1383                         if (tp->ops->reoffload) {
1384                                 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1385                                                          extack);
1386                                 if (err && add)
1387                                         goto err_playback_remove;
1388                         } else if (add && offload_in_use) {
1389                                 err = -EOPNOTSUPP;
1390                                 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1391                                 goto err_playback_remove;
1392                         }
1393                 }
1394         }
1395
1396         return 0;
1397
1398 err_playback_remove:
1399         tcf_proto_put(tp, true, NULL);
1400         tcf_chain_put(chain);
1401         tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1402                                     extack);
1403         return err;
1404 }
1405
1406 static int tcf_block_bind(struct tcf_block *block,
1407                           struct flow_block_offload *bo)
1408 {
1409         struct flow_block_cb *block_cb, *next;
1410         int err, i = 0;
1411
1412         lockdep_assert_held(&block->cb_lock);
1413
1414         list_for_each_entry(block_cb, &bo->cb_list, list) {
1415                 err = tcf_block_playback_offloads(block, block_cb->cb,
1416                                                   block_cb->cb_priv, true,
1417                                                   tcf_block_offload_in_use(block),
1418                                                   bo->extack);
1419                 if (err)
1420                         goto err_unroll;
1421                 if (!bo->unlocked_driver_cb)
1422                         block->lockeddevcnt++;
1423
1424                 i++;
1425         }
1426         list_splice(&bo->cb_list, &block->flow_block.cb_list);
1427
1428         return 0;
1429
1430 err_unroll:
1431         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1432                 if (i-- > 0) {
1433                         list_del(&block_cb->list);
1434                         tcf_block_playback_offloads(block, block_cb->cb,
1435                                                     block_cb->cb_priv, false,
1436                                                     tcf_block_offload_in_use(block),
1437                                                     NULL);
1438                         if (!bo->unlocked_driver_cb)
1439                                 block->lockeddevcnt--;
1440                 }
1441                 flow_block_cb_free(block_cb);
1442         }
1443
1444         return err;
1445 }
1446
1447 static void tcf_block_unbind(struct tcf_block *block,
1448                              struct flow_block_offload *bo)
1449 {
1450         struct flow_block_cb *block_cb, *next;
1451
1452         lockdep_assert_held(&block->cb_lock);
1453
1454         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1455                 tcf_block_playback_offloads(block, block_cb->cb,
1456                                             block_cb->cb_priv, false,
1457                                             tcf_block_offload_in_use(block),
1458                                             NULL);
1459                 list_del(&block_cb->list);
1460                 flow_block_cb_free(block_cb);
1461                 if (!bo->unlocked_driver_cb)
1462                         block->lockeddevcnt--;
1463         }
1464 }
1465
1466 static int tcf_block_setup(struct tcf_block *block,
1467                            struct flow_block_offload *bo)
1468 {
1469         int err;
1470
1471         switch (bo->command) {
1472         case FLOW_BLOCK_BIND:
1473                 err = tcf_block_bind(block, bo);
1474                 break;
1475         case FLOW_BLOCK_UNBIND:
1476                 err = 0;
1477                 tcf_block_unbind(block, bo);
1478                 break;
1479         default:
1480                 WARN_ON_ONCE(1);
1481                 err = -EOPNOTSUPP;
1482         }
1483
1484         return err;
1485 }
1486
1487 /* Main classifier routine: scans classifier chain attached
1488  * to this qdisc, (optionally) tests for protocol and asks
1489  * specific classifiers.
1490  */
1491 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1492                  struct tcf_result *res, bool compat_mode)
1493 {
1494 #ifdef CONFIG_NET_CLS_ACT
1495         const int max_reclassify_loop = 4;
1496         const struct tcf_proto *orig_tp = tp;
1497         const struct tcf_proto *first_tp;
1498         int limit = 0;
1499
1500 reclassify:
1501 #endif
1502         for (; tp; tp = rcu_dereference_bh(tp->next)) {
1503                 __be16 protocol = tc_skb_protocol(skb);
1504                 int err;
1505
1506                 if (tp->protocol != protocol &&
1507                     tp->protocol != htons(ETH_P_ALL))
1508                         continue;
1509
1510                 err = tp->classify(skb, tp, res);
1511 #ifdef CONFIG_NET_CLS_ACT
1512                 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1513                         first_tp = orig_tp;
1514                         goto reset;
1515                 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1516                         first_tp = res->goto_tp;
1517
1518 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1519                         {
1520                                 struct tc_skb_ext *ext;
1521
1522                                 ext = skb_ext_add(skb, TC_SKB_EXT);
1523                                 if (WARN_ON_ONCE(!ext))
1524                                         return TC_ACT_SHOT;
1525
1526                                 ext->chain = err & TC_ACT_EXT_VAL_MASK;
1527                         }
1528 #endif
1529                         goto reset;
1530                 }
1531 #endif
1532                 if (err >= 0)
1533                         return err;
1534         }
1535
1536         return TC_ACT_UNSPEC; /* signal: continue lookup */
1537 #ifdef CONFIG_NET_CLS_ACT
1538 reset:
1539         if (unlikely(limit++ >= max_reclassify_loop)) {
1540                 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1541                                        tp->chain->block->index,
1542                                        tp->prio & 0xffff,
1543                                        ntohs(tp->protocol));
1544                 return TC_ACT_SHOT;
1545         }
1546
1547         tp = first_tp;
1548         goto reclassify;
1549 #endif
1550 }
1551 EXPORT_SYMBOL(tcf_classify);
1552
1553 struct tcf_chain_info {
1554         struct tcf_proto __rcu **pprev;
1555         struct tcf_proto __rcu *next;
1556 };
1557
1558 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1559                                            struct tcf_chain_info *chain_info)
1560 {
1561         return tcf_chain_dereference(*chain_info->pprev, chain);
1562 }
1563
1564 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1565                                struct tcf_chain_info *chain_info,
1566                                struct tcf_proto *tp)
1567 {
1568         if (chain->flushing)
1569                 return -EAGAIN;
1570
1571         if (*chain_info->pprev == chain->filter_chain)
1572                 tcf_chain0_head_change(chain, tp);
1573         tcf_proto_get(tp);
1574         RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1575         rcu_assign_pointer(*chain_info->pprev, tp);
1576
1577         return 0;
1578 }
1579
1580 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1581                                 struct tcf_chain_info *chain_info,
1582                                 struct tcf_proto *tp)
1583 {
1584         struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1585
1586         tcf_proto_mark_delete(tp);
1587         if (tp == chain->filter_chain)
1588                 tcf_chain0_head_change(chain, next);
1589         RCU_INIT_POINTER(*chain_info->pprev, next);
1590 }
1591
1592 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1593                                            struct tcf_chain_info *chain_info,
1594                                            u32 protocol, u32 prio,
1595                                            bool prio_allocate);
1596
1597 /* Try to insert new proto.
1598  * If proto with specified priority already exists, free new proto
1599  * and return existing one.
1600  */
1601
1602 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1603                                                     struct tcf_proto *tp_new,
1604                                                     u32 protocol, u32 prio,
1605                                                     bool rtnl_held)
1606 {
1607         struct tcf_chain_info chain_info;
1608         struct tcf_proto *tp;
1609         int err = 0;
1610
1611         mutex_lock(&chain->filter_chain_lock);
1612
1613         tp = tcf_chain_tp_find(chain, &chain_info,
1614                                protocol, prio, false);
1615         if (!tp)
1616                 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1617         mutex_unlock(&chain->filter_chain_lock);
1618
1619         if (tp) {
1620                 tcf_proto_destroy(tp_new, rtnl_held, NULL);
1621                 tp_new = tp;
1622         } else if (err) {
1623                 tcf_proto_destroy(tp_new, rtnl_held, NULL);
1624                 tp_new = ERR_PTR(err);
1625         }
1626
1627         return tp_new;
1628 }
1629
1630 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1631                                       struct tcf_proto *tp, bool rtnl_held,
1632                                       struct netlink_ext_ack *extack)
1633 {
1634         struct tcf_chain_info chain_info;
1635         struct tcf_proto *tp_iter;
1636         struct tcf_proto **pprev;
1637         struct tcf_proto *next;
1638
1639         mutex_lock(&chain->filter_chain_lock);
1640
1641         /* Atomically find and remove tp from chain. */
1642         for (pprev = &chain->filter_chain;
1643              (tp_iter = tcf_chain_dereference(*pprev, chain));
1644              pprev = &tp_iter->next) {
1645                 if (tp_iter == tp) {
1646                         chain_info.pprev = pprev;
1647                         chain_info.next = tp_iter->next;
1648                         WARN_ON(tp_iter->deleting);
1649                         break;
1650                 }
1651         }
1652         /* Verify that tp still exists and no new filters were inserted
1653          * concurrently.
1654          * Mark tp for deletion if it is empty.
1655          */
1656         if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
1657                 mutex_unlock(&chain->filter_chain_lock);
1658                 return;
1659         }
1660
1661         next = tcf_chain_dereference(chain_info.next, chain);
1662         if (tp == chain->filter_chain)
1663                 tcf_chain0_head_change(chain, next);
1664         RCU_INIT_POINTER(*chain_info.pprev, next);
1665         mutex_unlock(&chain->filter_chain_lock);
1666
1667         tcf_proto_put(tp, rtnl_held, extack);
1668 }
1669
1670 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1671                                            struct tcf_chain_info *chain_info,
1672                                            u32 protocol, u32 prio,
1673                                            bool prio_allocate)
1674 {
1675         struct tcf_proto **pprev;
1676         struct tcf_proto *tp;
1677
1678         /* Check the chain for existence of proto-tcf with this priority */
1679         for (pprev = &chain->filter_chain;
1680              (tp = tcf_chain_dereference(*pprev, chain));
1681              pprev = &tp->next) {
1682                 if (tp->prio >= prio) {
1683                         if (tp->prio == prio) {
1684                                 if (prio_allocate ||
1685                                     (tp->protocol != protocol && protocol))
1686                                         return ERR_PTR(-EINVAL);
1687                         } else {
1688                                 tp = NULL;
1689                         }
1690                         break;
1691                 }
1692         }
1693         chain_info->pprev = pprev;
1694         if (tp) {
1695                 chain_info->next = tp->next;
1696                 tcf_proto_get(tp);
1697         } else {
1698                 chain_info->next = NULL;
1699         }
1700         return tp;
1701 }
1702
1703 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1704                          struct tcf_proto *tp, struct tcf_block *block,
1705                          struct Qdisc *q, u32 parent, void *fh,
1706                          u32 portid, u32 seq, u16 flags, int event,
1707                          bool rtnl_held)
1708 {
1709         struct tcmsg *tcm;
1710         struct nlmsghdr  *nlh;
1711         unsigned char *b = skb_tail_pointer(skb);
1712
1713         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1714         if (!nlh)
1715                 goto out_nlmsg_trim;
1716         tcm = nlmsg_data(nlh);
1717         tcm->tcm_family = AF_UNSPEC;
1718         tcm->tcm__pad1 = 0;
1719         tcm->tcm__pad2 = 0;
1720         if (q) {
1721                 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1722                 tcm->tcm_parent = parent;
1723         } else {
1724                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1725                 tcm->tcm_block_index = block->index;
1726         }
1727         tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1728         if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1729                 goto nla_put_failure;
1730         if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1731                 goto nla_put_failure;
1732         if (!fh) {
1733                 tcm->tcm_handle = 0;
1734         } else {
1735                 if (tp->ops->dump &&
1736                     tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1737                         goto nla_put_failure;
1738         }
1739         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1740         return skb->len;
1741
1742 out_nlmsg_trim:
1743 nla_put_failure:
1744         nlmsg_trim(skb, b);
1745         return -1;
1746 }
1747
1748 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1749                           struct nlmsghdr *n, struct tcf_proto *tp,
1750                           struct tcf_block *block, struct Qdisc *q,
1751                           u32 parent, void *fh, int event, bool unicast,
1752                           bool rtnl_held)
1753 {
1754         struct sk_buff *skb;
1755         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1756         int err = 0;
1757
1758         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1759         if (!skb)
1760                 return -ENOBUFS;
1761
1762         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1763                           n->nlmsg_seq, n->nlmsg_flags, event,
1764                           rtnl_held) <= 0) {
1765                 kfree_skb(skb);
1766                 return -EINVAL;
1767         }
1768
1769         if (unicast)
1770                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1771         else
1772                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1773                                      n->nlmsg_flags & NLM_F_ECHO);
1774
1775         if (err > 0)
1776                 err = 0;
1777         return err;
1778 }
1779
1780 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1781                               struct nlmsghdr *n, struct tcf_proto *tp,
1782                               struct tcf_block *block, struct Qdisc *q,
1783                               u32 parent, void *fh, bool unicast, bool *last,
1784                               bool rtnl_held, struct netlink_ext_ack *extack)
1785 {
1786         struct sk_buff *skb;
1787         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1788         int err;
1789
1790         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1791         if (!skb)
1792                 return -ENOBUFS;
1793
1794         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1795                           n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1796                           rtnl_held) <= 0) {
1797                 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1798                 kfree_skb(skb);
1799                 return -EINVAL;
1800         }
1801
1802         err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1803         if (err) {
1804                 kfree_skb(skb);
1805                 return err;
1806         }
1807
1808         if (unicast)
1809                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1810         else
1811                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1812                                      n->nlmsg_flags & NLM_F_ECHO);
1813         if (err < 0)
1814                 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1815
1816         if (err > 0)
1817                 err = 0;
1818         return err;
1819 }
1820
1821 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1822                                  struct tcf_block *block, struct Qdisc *q,
1823                                  u32 parent, struct nlmsghdr *n,
1824                                  struct tcf_chain *chain, int event,
1825                                  bool rtnl_held)
1826 {
1827         struct tcf_proto *tp;
1828
1829         for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1830              tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1831                 tfilter_notify(net, oskb, n, tp, block,
1832                                q, parent, NULL, event, false, rtnl_held);
1833 }
1834
1835 static void tfilter_put(struct tcf_proto *tp, void *fh)
1836 {
1837         if (tp->ops->put && fh)
1838                 tp->ops->put(tp, fh);
1839 }
1840
1841 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1842                           struct netlink_ext_ack *extack)
1843 {
1844         struct net *net = sock_net(skb->sk);
1845         struct nlattr *tca[TCA_MAX + 1];
1846         struct tcmsg *t;
1847         u32 protocol;
1848         u32 prio;
1849         bool prio_allocate;
1850         u32 parent;
1851         u32 chain_index;
1852         struct Qdisc *q = NULL;
1853         struct tcf_chain_info chain_info;
1854         struct tcf_chain *chain = NULL;
1855         struct tcf_block *block;
1856         struct tcf_proto *tp;
1857         unsigned long cl;
1858         void *fh;
1859         int err;
1860         int tp_created;
1861         bool rtnl_held = false;
1862
1863         if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1864                 return -EPERM;
1865
1866 replay:
1867         tp_created = 0;
1868
1869         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1870                                      rtm_tca_policy, extack);
1871         if (err < 0)
1872                 return err;
1873
1874         t = nlmsg_data(n);
1875         protocol = TC_H_MIN(t->tcm_info);
1876         prio = TC_H_MAJ(t->tcm_info);
1877         prio_allocate = false;
1878         parent = t->tcm_parent;
1879         tp = NULL;
1880         cl = 0;
1881         block = NULL;
1882
1883         if (prio == 0) {
1884                 /* If no priority is provided by the user,
1885                  * we allocate one.
1886                  */
1887                 if (n->nlmsg_flags & NLM_F_CREATE) {
1888                         prio = TC_H_MAKE(0x80000000U, 0U);
1889                         prio_allocate = true;
1890                 } else {
1891                         NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1892                         return -ENOENT;
1893                 }
1894         }
1895
1896         /* Find head of filter chain. */
1897
1898         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1899         if (err)
1900                 return err;
1901
1902         /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
1903          * block is shared (no qdisc found), qdisc is not unlocked, classifier
1904          * type is not specified, classifier is not unlocked.
1905          */
1906         if (rtnl_held ||
1907             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
1908             !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
1909                 rtnl_held = true;
1910                 rtnl_lock();
1911         }
1912
1913         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
1914         if (err)
1915                 goto errout;
1916
1917         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
1918                                  extack);
1919         if (IS_ERR(block)) {
1920                 err = PTR_ERR(block);
1921                 goto errout;
1922         }
1923
1924         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1925         if (chain_index > TC_ACT_EXT_VAL_MASK) {
1926                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
1927                 err = -EINVAL;
1928                 goto errout;
1929         }
1930         chain = tcf_chain_get(block, chain_index, true);
1931         if (!chain) {
1932                 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
1933                 err = -ENOMEM;
1934                 goto errout;
1935         }
1936
1937         mutex_lock(&chain->filter_chain_lock);
1938         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1939                                prio, prio_allocate);
1940         if (IS_ERR(tp)) {
1941                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
1942                 err = PTR_ERR(tp);
1943                 goto errout_locked;
1944         }
1945
1946         if (tp == NULL) {
1947                 struct tcf_proto *tp_new = NULL;
1948
1949                 if (chain->flushing) {
1950                         err = -EAGAIN;
1951                         goto errout_locked;
1952                 }
1953
1954                 /* Proto-tcf does not exist, create new one */
1955
1956                 if (tca[TCA_KIND] == NULL || !protocol) {
1957                         NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
1958                         err = -EINVAL;
1959                         goto errout_locked;
1960                 }
1961
1962                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1963                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
1964                         err = -ENOENT;
1965                         goto errout_locked;
1966                 }
1967
1968                 if (prio_allocate)
1969                         prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
1970                                                                &chain_info));
1971
1972                 mutex_unlock(&chain->filter_chain_lock);
1973                 tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
1974                                           protocol, prio, chain, rtnl_held,
1975                                           extack);
1976                 if (IS_ERR(tp_new)) {
1977                         err = PTR_ERR(tp_new);
1978                         goto errout_tp;
1979                 }
1980
1981                 tp_created = 1;
1982                 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
1983                                                 rtnl_held);
1984                 if (IS_ERR(tp)) {
1985                         err = PTR_ERR(tp);
1986                         goto errout_tp;
1987                 }
1988         } else {
1989                 mutex_unlock(&chain->filter_chain_lock);
1990         }
1991
1992         if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1993                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1994                 err = -EINVAL;
1995                 goto errout;
1996         }
1997
1998         fh = tp->ops->get(tp, t->tcm_handle);
1999
2000         if (!fh) {
2001                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2002                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2003                         err = -ENOENT;
2004                         goto errout;
2005                 }
2006         } else if (n->nlmsg_flags & NLM_F_EXCL) {
2007                 tfilter_put(tp, fh);
2008                 NL_SET_ERR_MSG(extack, "Filter already exists");
2009                 err = -EEXIST;
2010                 goto errout;
2011         }
2012
2013         if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2014                 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2015                 err = -EINVAL;
2016                 goto errout;
2017         }
2018
2019         err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2020                               n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2021                               rtnl_held, extack);
2022         if (err == 0) {
2023                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2024                                RTM_NEWTFILTER, false, rtnl_held);
2025                 tfilter_put(tp, fh);
2026                 /* q pointer is NULL for shared blocks */
2027                 if (q)
2028                         q->flags &= ~TCQ_F_CAN_BYPASS;
2029         }
2030
2031 errout:
2032         if (err && tp_created)
2033                 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2034 errout_tp:
2035         if (chain) {
2036                 if (tp && !IS_ERR(tp))
2037                         tcf_proto_put(tp, rtnl_held, NULL);
2038                 if (!tp_created)
2039                         tcf_chain_put(chain);
2040         }
2041         tcf_block_release(q, block, rtnl_held);
2042
2043         if (rtnl_held)
2044                 rtnl_unlock();
2045
2046         if (err == -EAGAIN) {
2047                 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2048                  * of target chain.
2049                  */
2050                 rtnl_held = true;
2051                 /* Replay the request. */
2052                 goto replay;
2053         }
2054         return err;
2055
2056 errout_locked:
2057         mutex_unlock(&chain->filter_chain_lock);
2058         goto errout;
2059 }
2060
2061 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2062                           struct netlink_ext_ack *extack)
2063 {
2064         struct net *net = sock_net(skb->sk);
2065         struct nlattr *tca[TCA_MAX + 1];
2066         struct tcmsg *t;
2067         u32 protocol;
2068         u32 prio;
2069         u32 parent;
2070         u32 chain_index;
2071         struct Qdisc *q = NULL;
2072         struct tcf_chain_info chain_info;
2073         struct tcf_chain *chain = NULL;
2074         struct tcf_block *block = NULL;
2075         struct tcf_proto *tp = NULL;
2076         unsigned long cl = 0;
2077         void *fh = NULL;
2078         int err;
2079         bool rtnl_held = false;
2080
2081         if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2082                 return -EPERM;
2083
2084         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2085                                      rtm_tca_policy, extack);
2086         if (err < 0)
2087                 return err;
2088
2089         t = nlmsg_data(n);
2090         protocol = TC_H_MIN(t->tcm_info);
2091         prio = TC_H_MAJ(t->tcm_info);
2092         parent = t->tcm_parent;
2093
2094         if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2095                 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2096                 return -ENOENT;
2097         }
2098
2099         /* Find head of filter chain. */
2100
2101         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2102         if (err)
2103                 return err;
2104
2105         /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2106          * found), qdisc is not unlocked, classifier type is not specified,
2107          * classifier is not unlocked.
2108          */
2109         if (!prio ||
2110             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2111             !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2112                 rtnl_held = true;
2113                 rtnl_lock();
2114         }
2115
2116         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2117         if (err)
2118                 goto errout;
2119
2120         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2121                                  extack);
2122         if (IS_ERR(block)) {
2123                 err = PTR_ERR(block);
2124                 goto errout;
2125         }
2126
2127         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2128         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2129                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2130                 err = -EINVAL;
2131                 goto errout;
2132         }
2133         chain = tcf_chain_get(block, chain_index, false);
2134         if (!chain) {
2135                 /* User requested flush on non-existent chain. Nothing to do,
2136                  * so just return success.
2137                  */
2138                 if (prio == 0) {
2139                         err = 0;
2140                         goto errout;
2141                 }
2142                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2143                 err = -ENOENT;
2144                 goto errout;
2145         }
2146
2147         if (prio == 0) {
2148                 tfilter_notify_chain(net, skb, block, q, parent, n,
2149                                      chain, RTM_DELTFILTER, rtnl_held);
2150                 tcf_chain_flush(chain, rtnl_held);
2151                 err = 0;
2152                 goto errout;
2153         }
2154
2155         mutex_lock(&chain->filter_chain_lock);
2156         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2157                                prio, false);
2158         if (!tp || IS_ERR(tp)) {
2159                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2160                 err = tp ? PTR_ERR(tp) : -ENOENT;
2161                 goto errout_locked;
2162         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2163                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2164                 err = -EINVAL;
2165                 goto errout_locked;
2166         } else if (t->tcm_handle == 0) {
2167                 tcf_chain_tp_remove(chain, &chain_info, tp);
2168                 mutex_unlock(&chain->filter_chain_lock);
2169
2170                 tcf_proto_put(tp, rtnl_held, NULL);
2171                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2172                                RTM_DELTFILTER, false, rtnl_held);
2173                 err = 0;
2174                 goto errout;
2175         }
2176         mutex_unlock(&chain->filter_chain_lock);
2177
2178         fh = tp->ops->get(tp, t->tcm_handle);
2179
2180         if (!fh) {
2181                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2182                 err = -ENOENT;
2183         } else {
2184                 bool last;
2185
2186                 err = tfilter_del_notify(net, skb, n, tp, block,
2187                                          q, parent, fh, false, &last,
2188                                          rtnl_held, extack);
2189
2190                 if (err)
2191                         goto errout;
2192                 if (last)
2193                         tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2194         }
2195
2196 errout:
2197         if (chain) {
2198                 if (tp && !IS_ERR(tp))
2199                         tcf_proto_put(tp, rtnl_held, NULL);
2200                 tcf_chain_put(chain);
2201         }
2202         tcf_block_release(q, block, rtnl_held);
2203
2204         if (rtnl_held)
2205                 rtnl_unlock();
2206
2207         return err;
2208
2209 errout_locked:
2210         mutex_unlock(&chain->filter_chain_lock);
2211         goto errout;
2212 }
2213
2214 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2215                           struct netlink_ext_ack *extack)
2216 {
2217         struct net *net = sock_net(skb->sk);
2218         struct nlattr *tca[TCA_MAX + 1];
2219         struct tcmsg *t;
2220         u32 protocol;
2221         u32 prio;
2222         u32 parent;
2223         u32 chain_index;
2224         struct Qdisc *q = NULL;
2225         struct tcf_chain_info chain_info;
2226         struct tcf_chain *chain = NULL;
2227         struct tcf_block *block = NULL;
2228         struct tcf_proto *tp = NULL;
2229         unsigned long cl = 0;
2230         void *fh = NULL;
2231         int err;
2232         bool rtnl_held = false;
2233
2234         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2235                                      rtm_tca_policy, extack);
2236         if (err < 0)
2237                 return err;
2238
2239         t = nlmsg_data(n);
2240         protocol = TC_H_MIN(t->tcm_info);
2241         prio = TC_H_MAJ(t->tcm_info);
2242         parent = t->tcm_parent;
2243
2244         if (prio == 0) {
2245                 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2246                 return -ENOENT;
2247         }
2248
2249         /* Find head of filter chain. */
2250
2251         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2252         if (err)
2253                 return err;
2254
2255         /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2256          * unlocked, classifier type is not specified, classifier is not
2257          * unlocked.
2258          */
2259         if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2260             !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2261                 rtnl_held = true;
2262                 rtnl_lock();
2263         }
2264
2265         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2266         if (err)
2267                 goto errout;
2268
2269         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2270                                  extack);
2271         if (IS_ERR(block)) {
2272                 err = PTR_ERR(block);
2273                 goto errout;
2274         }
2275
2276         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2277         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2278                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2279                 err = -EINVAL;
2280                 goto errout;
2281         }
2282         chain = tcf_chain_get(block, chain_index, false);
2283         if (!chain) {
2284                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2285                 err = -EINVAL;
2286                 goto errout;
2287         }
2288
2289         mutex_lock(&chain->filter_chain_lock);
2290         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2291                                prio, false);
2292         mutex_unlock(&chain->filter_chain_lock);
2293         if (!tp || IS_ERR(tp)) {
2294                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2295                 err = tp ? PTR_ERR(tp) : -ENOENT;
2296                 goto errout;
2297         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2298                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2299                 err = -EINVAL;
2300                 goto errout;
2301         }
2302
2303         fh = tp->ops->get(tp, t->tcm_handle);
2304
2305         if (!fh) {
2306                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2307                 err = -ENOENT;
2308         } else {
2309                 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2310                                      fh, RTM_NEWTFILTER, true, rtnl_held);
2311                 if (err < 0)
2312                         NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2313         }
2314
2315         tfilter_put(tp, fh);
2316 errout:
2317         if (chain) {
2318                 if (tp && !IS_ERR(tp))
2319                         tcf_proto_put(tp, rtnl_held, NULL);
2320                 tcf_chain_put(chain);
2321         }
2322         tcf_block_release(q, block, rtnl_held);
2323
2324         if (rtnl_held)
2325                 rtnl_unlock();
2326
2327         return err;
2328 }
2329
2330 struct tcf_dump_args {
2331         struct tcf_walker w;
2332         struct sk_buff *skb;
2333         struct netlink_callback *cb;
2334         struct tcf_block *block;
2335         struct Qdisc *q;
2336         u32 parent;
2337 };
2338
2339 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2340 {
2341         struct tcf_dump_args *a = (void *)arg;
2342         struct net *net = sock_net(a->skb->sk);
2343
2344         return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2345                              n, NETLINK_CB(a->cb->skb).portid,
2346                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2347                              RTM_NEWTFILTER, true);
2348 }
2349
2350 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2351                            struct sk_buff *skb, struct netlink_callback *cb,
2352                            long index_start, long *p_index)
2353 {
2354         struct net *net = sock_net(skb->sk);
2355         struct tcf_block *block = chain->block;
2356         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2357         struct tcf_proto *tp, *tp_prev;
2358         struct tcf_dump_args arg;
2359
2360         for (tp = __tcf_get_next_proto(chain, NULL);
2361              tp;
2362              tp_prev = tp,
2363                      tp = __tcf_get_next_proto(chain, tp),
2364                      tcf_proto_put(tp_prev, true, NULL),
2365                      (*p_index)++) {
2366                 if (*p_index < index_start)
2367                         continue;
2368                 if (TC_H_MAJ(tcm->tcm_info) &&
2369                     TC_H_MAJ(tcm->tcm_info) != tp->prio)
2370                         continue;
2371                 if (TC_H_MIN(tcm->tcm_info) &&
2372                     TC_H_MIN(tcm->tcm_info) != tp->protocol)
2373                         continue;
2374                 if (*p_index > index_start)
2375                         memset(&cb->args[1], 0,
2376                                sizeof(cb->args) - sizeof(cb->args[0]));
2377                 if (cb->args[1] == 0) {
2378                         if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2379                                           NETLINK_CB(cb->skb).portid,
2380                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
2381                                           RTM_NEWTFILTER, true) <= 0)
2382                                 goto errout;
2383                         cb->args[1] = 1;
2384                 }
2385                 if (!tp->ops->walk)
2386                         continue;
2387                 arg.w.fn = tcf_node_dump;
2388                 arg.skb = skb;
2389                 arg.cb = cb;
2390                 arg.block = block;
2391                 arg.q = q;
2392                 arg.parent = parent;
2393                 arg.w.stop = 0;
2394                 arg.w.skip = cb->args[1] - 1;
2395                 arg.w.count = 0;
2396                 arg.w.cookie = cb->args[2];
2397                 tp->ops->walk(tp, &arg.w, true);
2398                 cb->args[2] = arg.w.cookie;
2399                 cb->args[1] = arg.w.count + 1;
2400                 if (arg.w.stop)
2401                         goto errout;
2402         }
2403         return true;
2404
2405 errout:
2406         tcf_proto_put(tp, true, NULL);
2407         return false;
2408 }
2409
2410 /* called with RTNL */
2411 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2412 {
2413         struct tcf_chain *chain, *chain_prev;
2414         struct net *net = sock_net(skb->sk);
2415         struct nlattr *tca[TCA_MAX + 1];
2416         struct Qdisc *q = NULL;
2417         struct tcf_block *block;
2418         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2419         long index_start;
2420         long index;
2421         u32 parent;
2422         int err;
2423
2424         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2425                 return skb->len;
2426
2427         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2428                                      NULL, cb->extack);
2429         if (err)
2430                 return err;
2431
2432         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2433                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2434                 if (!block)
2435                         goto out;
2436                 /* If we work with block index, q is NULL and parent value
2437                  * will never be used in the following code. The check
2438                  * in tcf_fill_node prevents it. However, compiler does not
2439                  * see that far, so set parent to zero to silence the warning
2440                  * about parent being uninitialized.
2441                  */
2442                 parent = 0;
2443         } else {
2444                 const struct Qdisc_class_ops *cops;
2445                 struct net_device *dev;
2446                 unsigned long cl = 0;
2447
2448                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2449                 if (!dev)
2450                         return skb->len;
2451
2452                 parent = tcm->tcm_parent;
2453                 if (!parent) {
2454                         q = dev->qdisc;
2455                         parent = q->handle;
2456                 } else {
2457                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2458                 }
2459                 if (!q)
2460                         goto out;
2461                 cops = q->ops->cl_ops;
2462                 if (!cops)
2463                         goto out;
2464                 if (!cops->tcf_block)
2465                         goto out;
2466                 if (TC_H_MIN(tcm->tcm_parent)) {
2467                         cl = cops->find(q, tcm->tcm_parent);
2468                         if (cl == 0)
2469                                 goto out;
2470                 }
2471                 block = cops->tcf_block(q, cl, NULL);
2472                 if (!block)
2473                         goto out;
2474                 if (tcf_block_shared(block))
2475                         q = NULL;
2476         }
2477
2478         index_start = cb->args[0];
2479         index = 0;
2480
2481         for (chain = __tcf_get_next_chain(block, NULL);
2482              chain;
2483              chain_prev = chain,
2484                      chain = __tcf_get_next_chain(block, chain),
2485                      tcf_chain_put(chain_prev)) {
2486                 if (tca[TCA_CHAIN] &&
2487                     nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2488                         continue;
2489                 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2490                                     index_start, &index)) {
2491                         tcf_chain_put(chain);
2492                         err = -EMSGSIZE;
2493                         break;
2494                 }
2495         }
2496
2497         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2498                 tcf_block_refcnt_put(block, true);
2499         cb->args[0] = index;
2500
2501 out:
2502         /* If we did no progress, the error (EMSGSIZE) is real */
2503         if (skb->len == 0 && err)
2504                 return err;
2505         return skb->len;
2506 }
2507
2508 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2509                               void *tmplt_priv, u32 chain_index,
2510                               struct net *net, struct sk_buff *skb,
2511                               struct tcf_block *block,
2512                               u32 portid, u32 seq, u16 flags, int event)
2513 {
2514         unsigned char *b = skb_tail_pointer(skb);
2515         const struct tcf_proto_ops *ops;
2516         struct nlmsghdr *nlh;
2517         struct tcmsg *tcm;
2518         void *priv;
2519
2520         ops = tmplt_ops;
2521         priv = tmplt_priv;
2522
2523         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2524         if (!nlh)
2525                 goto out_nlmsg_trim;
2526         tcm = nlmsg_data(nlh);
2527         tcm->tcm_family = AF_UNSPEC;
2528         tcm->tcm__pad1 = 0;
2529         tcm->tcm__pad2 = 0;
2530         tcm->tcm_handle = 0;
2531         if (block->q) {
2532                 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2533                 tcm->tcm_parent = block->q->handle;
2534         } else {
2535                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2536                 tcm->tcm_block_index = block->index;
2537         }
2538
2539         if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2540                 goto nla_put_failure;
2541
2542         if (ops) {
2543                 if (nla_put_string(skb, TCA_KIND, ops->kind))
2544                         goto nla_put_failure;
2545                 if (ops->tmplt_dump(skb, net, priv) < 0)
2546                         goto nla_put_failure;
2547         }
2548
2549         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2550         return skb->len;
2551
2552 out_nlmsg_trim:
2553 nla_put_failure:
2554         nlmsg_trim(skb, b);
2555         return -EMSGSIZE;
2556 }
2557
2558 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2559                            u32 seq, u16 flags, int event, bool unicast)
2560 {
2561         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2562         struct tcf_block *block = chain->block;
2563         struct net *net = block->net;
2564         struct sk_buff *skb;
2565         int err = 0;
2566
2567         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2568         if (!skb)
2569                 return -ENOBUFS;
2570
2571         if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2572                                chain->index, net, skb, block, portid,
2573                                seq, flags, event) <= 0) {
2574                 kfree_skb(skb);
2575                 return -EINVAL;
2576         }
2577
2578         if (unicast)
2579                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2580         else
2581                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2582                                      flags & NLM_F_ECHO);
2583
2584         if (err > 0)
2585                 err = 0;
2586         return err;
2587 }
2588
2589 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2590                                   void *tmplt_priv, u32 chain_index,
2591                                   struct tcf_block *block, struct sk_buff *oskb,
2592                                   u32 seq, u16 flags, bool unicast)
2593 {
2594         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2595         struct net *net = block->net;
2596         struct sk_buff *skb;
2597
2598         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2599         if (!skb)
2600                 return -ENOBUFS;
2601
2602         if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2603                                block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2604                 kfree_skb(skb);
2605                 return -EINVAL;
2606         }
2607
2608         if (unicast)
2609                 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2610
2611         return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2612 }
2613
2614 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2615                               struct nlattr **tca,
2616                               struct netlink_ext_ack *extack)
2617 {
2618         const struct tcf_proto_ops *ops;
2619         void *tmplt_priv;
2620
2621         /* If kind is not set, user did not specify template. */
2622         if (!tca[TCA_KIND])
2623                 return 0;
2624
2625         ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack);
2626         if (IS_ERR(ops))
2627                 return PTR_ERR(ops);
2628         if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2629                 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2630                 return -EOPNOTSUPP;
2631         }
2632
2633         tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2634         if (IS_ERR(tmplt_priv)) {
2635                 module_put(ops->owner);
2636                 return PTR_ERR(tmplt_priv);
2637         }
2638         chain->tmplt_ops = ops;
2639         chain->tmplt_priv = tmplt_priv;
2640         return 0;
2641 }
2642
2643 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2644                                void *tmplt_priv)
2645 {
2646         /* If template ops are set, no work to do for us. */
2647         if (!tmplt_ops)
2648                 return;
2649
2650         tmplt_ops->tmplt_destroy(tmplt_priv);
2651         module_put(tmplt_ops->owner);
2652 }
2653
2654 /* Add/delete/get a chain */
2655
2656 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2657                         struct netlink_ext_ack *extack)
2658 {
2659         struct net *net = sock_net(skb->sk);
2660         struct nlattr *tca[TCA_MAX + 1];
2661         struct tcmsg *t;
2662         u32 parent;
2663         u32 chain_index;
2664         struct Qdisc *q = NULL;
2665         struct tcf_chain *chain = NULL;
2666         struct tcf_block *block;
2667         unsigned long cl;
2668         int err;
2669
2670         if (n->nlmsg_type != RTM_GETCHAIN &&
2671             !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2672                 return -EPERM;
2673
2674 replay:
2675         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2676                                      rtm_tca_policy, extack);
2677         if (err < 0)
2678                 return err;
2679
2680         t = nlmsg_data(n);
2681         parent = t->tcm_parent;
2682         cl = 0;
2683
2684         block = tcf_block_find(net, &q, &parent, &cl,
2685                                t->tcm_ifindex, t->tcm_block_index, extack);
2686         if (IS_ERR(block))
2687                 return PTR_ERR(block);
2688
2689         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2690         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2691                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2692                 err = -EINVAL;
2693                 goto errout_block;
2694         }
2695
2696         mutex_lock(&block->lock);
2697         chain = tcf_chain_lookup(block, chain_index);
2698         if (n->nlmsg_type == RTM_NEWCHAIN) {
2699                 if (chain) {
2700                         if (tcf_chain_held_by_acts_only(chain)) {
2701                                 /* The chain exists only because there is
2702                                  * some action referencing it.
2703                                  */
2704                                 tcf_chain_hold(chain);
2705                         } else {
2706                                 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2707                                 err = -EEXIST;
2708                                 goto errout_block_locked;
2709                         }
2710                 } else {
2711                         if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2712                                 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2713                                 err = -ENOENT;
2714                                 goto errout_block_locked;
2715                         }
2716                         chain = tcf_chain_create(block, chain_index);
2717                         if (!chain) {
2718                                 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2719                                 err = -ENOMEM;
2720                                 goto errout_block_locked;
2721                         }
2722                 }
2723         } else {
2724                 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2725                         NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2726                         err = -EINVAL;
2727                         goto errout_block_locked;
2728                 }
2729                 tcf_chain_hold(chain);
2730         }
2731
2732         if (n->nlmsg_type == RTM_NEWCHAIN) {
2733                 /* Modifying chain requires holding parent block lock. In case
2734                  * the chain was successfully added, take a reference to the
2735                  * chain. This ensures that an empty chain does not disappear at
2736                  * the end of this function.
2737                  */
2738                 tcf_chain_hold(chain);
2739                 chain->explicitly_created = true;
2740         }
2741         mutex_unlock(&block->lock);
2742
2743         switch (n->nlmsg_type) {
2744         case RTM_NEWCHAIN:
2745                 err = tc_chain_tmplt_add(chain, net, tca, extack);
2746                 if (err) {
2747                         tcf_chain_put_explicitly_created(chain);
2748                         goto errout;
2749                 }
2750
2751                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2752                                 RTM_NEWCHAIN, false);
2753                 break;
2754         case RTM_DELCHAIN:
2755                 tfilter_notify_chain(net, skb, block, q, parent, n,
2756                                      chain, RTM_DELTFILTER, true);
2757                 /* Flush the chain first as the user requested chain removal. */
2758                 tcf_chain_flush(chain, true);
2759                 /* In case the chain was successfully deleted, put a reference
2760                  * to the chain previously taken during addition.
2761                  */
2762                 tcf_chain_put_explicitly_created(chain);
2763                 break;
2764         case RTM_GETCHAIN:
2765                 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2766                                       n->nlmsg_seq, n->nlmsg_type, true);
2767                 if (err < 0)
2768                         NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2769                 break;
2770         default:
2771                 err = -EOPNOTSUPP;
2772                 NL_SET_ERR_MSG(extack, "Unsupported message type");
2773                 goto errout;
2774         }
2775
2776 errout:
2777         tcf_chain_put(chain);
2778 errout_block:
2779         tcf_block_release(q, block, true);
2780         if (err == -EAGAIN)
2781                 /* Replay the request. */
2782                 goto replay;
2783         return err;
2784
2785 errout_block_locked:
2786         mutex_unlock(&block->lock);
2787         goto errout_block;
2788 }
2789
2790 /* called with RTNL */
2791 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2792 {
2793         struct net *net = sock_net(skb->sk);
2794         struct nlattr *tca[TCA_MAX + 1];
2795         struct Qdisc *q = NULL;
2796         struct tcf_block *block;
2797         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2798         struct tcf_chain *chain;
2799         long index_start;
2800         long index;
2801         u32 parent;
2802         int err;
2803
2804         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2805                 return skb->len;
2806
2807         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2808                                      rtm_tca_policy, cb->extack);
2809         if (err)
2810                 return err;
2811
2812         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2813                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2814                 if (!block)
2815                         goto out;
2816                 /* If we work with block index, q is NULL and parent value
2817                  * will never be used in the following code. The check
2818                  * in tcf_fill_node prevents it. However, compiler does not
2819                  * see that far, so set parent to zero to silence the warning
2820                  * about parent being uninitialized.
2821                  */
2822                 parent = 0;
2823         } else {
2824                 const struct Qdisc_class_ops *cops;
2825                 struct net_device *dev;
2826                 unsigned long cl = 0;
2827
2828                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2829                 if (!dev)
2830                         return skb->len;
2831
2832                 parent = tcm->tcm_parent;
2833                 if (!parent) {
2834                         q = dev->qdisc;
2835                         parent = q->handle;
2836                 } else {
2837                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2838                 }
2839                 if (!q)
2840                         goto out;
2841                 cops = q->ops->cl_ops;
2842                 if (!cops)
2843                         goto out;
2844                 if (!cops->tcf_block)
2845                         goto out;
2846                 if (TC_H_MIN(tcm->tcm_parent)) {
2847                         cl = cops->find(q, tcm->tcm_parent);
2848                         if (cl == 0)
2849                                 goto out;
2850                 }
2851                 block = cops->tcf_block(q, cl, NULL);
2852                 if (!block)
2853                         goto out;
2854                 if (tcf_block_shared(block))
2855                         q = NULL;
2856         }
2857
2858         index_start = cb->args[0];
2859         index = 0;
2860
2861         mutex_lock(&block->lock);
2862         list_for_each_entry(chain, &block->chain_list, list) {
2863                 if ((tca[TCA_CHAIN] &&
2864                      nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2865                         continue;
2866                 if (index < index_start) {
2867                         index++;
2868                         continue;
2869                 }
2870                 if (tcf_chain_held_by_acts_only(chain))
2871                         continue;
2872                 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2873                                          chain->index, net, skb, block,
2874                                          NETLINK_CB(cb->skb).portid,
2875                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
2876                                          RTM_NEWCHAIN);
2877                 if (err <= 0)
2878                         break;
2879                 index++;
2880         }
2881         mutex_unlock(&block->lock);
2882
2883         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2884                 tcf_block_refcnt_put(block, true);
2885         cb->args[0] = index;
2886
2887 out:
2888         /* If we did no progress, the error (EMSGSIZE) is real */
2889         if (skb->len == 0 && err)
2890                 return err;
2891         return skb->len;
2892 }
2893
2894 void tcf_exts_destroy(struct tcf_exts *exts)
2895 {
2896 #ifdef CONFIG_NET_CLS_ACT
2897         if (exts->actions) {
2898                 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
2899                 kfree(exts->actions);
2900         }
2901         exts->nr_actions = 0;
2902 #endif
2903 }
2904 EXPORT_SYMBOL(tcf_exts_destroy);
2905
2906 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
2907                       struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
2908                       bool rtnl_held, struct netlink_ext_ack *extack)
2909 {
2910 #ifdef CONFIG_NET_CLS_ACT
2911         {
2912                 struct tc_action *act;
2913                 size_t attr_size = 0;
2914
2915                 if (exts->police && tb[exts->police]) {
2916                         act = tcf_action_init_1(net, tp, tb[exts->police],
2917                                                 rate_tlv, "police", ovr,
2918                                                 TCA_ACT_BIND, rtnl_held,
2919                                                 extack);
2920                         if (IS_ERR(act))
2921                                 return PTR_ERR(act);
2922
2923                         act->type = exts->type = TCA_OLD_COMPAT;
2924                         exts->actions[0] = act;
2925                         exts->nr_actions = 1;
2926                 } else if (exts->action && tb[exts->action]) {
2927                         int err;
2928
2929                         err = tcf_action_init(net, tp, tb[exts->action],
2930                                               rate_tlv, NULL, ovr, TCA_ACT_BIND,
2931                                               exts->actions, &attr_size,
2932                                               rtnl_held, extack);
2933                         if (err < 0)
2934                                 return err;
2935                         exts->nr_actions = err;
2936                 }
2937         }
2938 #else
2939         if ((exts->action && tb[exts->action]) ||
2940             (exts->police && tb[exts->police])) {
2941                 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
2942                 return -EOPNOTSUPP;
2943         }
2944 #endif
2945
2946         return 0;
2947 }
2948 EXPORT_SYMBOL(tcf_exts_validate);
2949
2950 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
2951 {
2952 #ifdef CONFIG_NET_CLS_ACT
2953         struct tcf_exts old = *dst;
2954
2955         *dst = *src;
2956         tcf_exts_destroy(&old);
2957 #endif
2958 }
2959 EXPORT_SYMBOL(tcf_exts_change);
2960
2961 #ifdef CONFIG_NET_CLS_ACT
2962 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
2963 {
2964         if (exts->nr_actions == 0)
2965                 return NULL;
2966         else
2967                 return exts->actions[0];
2968 }
2969 #endif
2970
2971 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
2972 {
2973 #ifdef CONFIG_NET_CLS_ACT
2974         struct nlattr *nest;
2975
2976         if (exts->action && tcf_exts_has_actions(exts)) {
2977                 /*
2978                  * again for backward compatible mode - we want
2979                  * to work with both old and new modes of entering
2980                  * tc data even if iproute2  was newer - jhs
2981                  */
2982                 if (exts->type != TCA_OLD_COMPAT) {
2983                         nest = nla_nest_start_noflag(skb, exts->action);
2984                         if (nest == NULL)
2985                                 goto nla_put_failure;
2986
2987                         if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
2988                                 goto nla_put_failure;
2989                         nla_nest_end(skb, nest);
2990                 } else if (exts->police) {
2991                         struct tc_action *act = tcf_exts_first_act(exts);
2992                         nest = nla_nest_start_noflag(skb, exts->police);
2993                         if (nest == NULL || !act)
2994                                 goto nla_put_failure;
2995                         if (tcf_action_dump_old(skb, act, 0, 0) < 0)
2996                                 goto nla_put_failure;
2997                         nla_nest_end(skb, nest);
2998                 }
2999         }
3000         return 0;
3001
3002 nla_put_failure:
3003         nla_nest_cancel(skb, nest);
3004         return -1;
3005 #else
3006         return 0;
3007 #endif
3008 }
3009 EXPORT_SYMBOL(tcf_exts_dump);
3010
3011
3012 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3013 {
3014 #ifdef CONFIG_NET_CLS_ACT
3015         struct tc_action *a = tcf_exts_first_act(exts);
3016         if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3017                 return -1;
3018 #endif
3019         return 0;
3020 }
3021 EXPORT_SYMBOL(tcf_exts_dump_stats);
3022
3023 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3024 {
3025         if (*flags & TCA_CLS_FLAGS_IN_HW)
3026                 return;
3027         *flags |= TCA_CLS_FLAGS_IN_HW;
3028         atomic_inc(&block->offloadcnt);
3029 }
3030
3031 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3032 {
3033         if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3034                 return;
3035         *flags &= ~TCA_CLS_FLAGS_IN_HW;
3036         atomic_dec(&block->offloadcnt);
3037 }
3038
3039 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3040                                       struct tcf_proto *tp, u32 *cnt,
3041                                       u32 *flags, u32 diff, bool add)
3042 {
3043         lockdep_assert_held(&block->cb_lock);
3044
3045         spin_lock(&tp->lock);
3046         if (add) {
3047                 if (!*cnt)
3048                         tcf_block_offload_inc(block, flags);
3049                 *cnt += diff;
3050         } else {
3051                 *cnt -= diff;
3052                 if (!*cnt)
3053                         tcf_block_offload_dec(block, flags);
3054         }
3055         spin_unlock(&tp->lock);
3056 }
3057
3058 static void
3059 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3060                          u32 *cnt, u32 *flags)
3061 {
3062         lockdep_assert_held(&block->cb_lock);
3063
3064         spin_lock(&tp->lock);
3065         tcf_block_offload_dec(block, flags);
3066         *cnt = 0;
3067         spin_unlock(&tp->lock);
3068 }
3069
3070 static int
3071 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3072                    void *type_data, bool err_stop)
3073 {
3074         struct flow_block_cb *block_cb;
3075         int ok_count = 0;
3076         int err;
3077
3078         list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3079                 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3080                 if (err) {
3081                         if (err_stop)
3082                                 return err;
3083                 } else {
3084                         ok_count++;
3085                 }
3086         }
3087         return ok_count;
3088 }
3089
3090 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3091                      void *type_data, bool err_stop, bool rtnl_held)
3092 {
3093         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3094         int ok_count;
3095
3096 retry:
3097         if (take_rtnl)
3098                 rtnl_lock();
3099         down_read(&block->cb_lock);
3100         /* Need to obtain rtnl lock if block is bound to devs that require it.
3101          * In block bind code cb_lock is obtained while holding rtnl, so we must
3102          * obtain the locks in same order here.
3103          */
3104         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3105                 up_read(&block->cb_lock);
3106                 take_rtnl = true;
3107                 goto retry;
3108         }
3109
3110         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3111
3112         up_read(&block->cb_lock);
3113         if (take_rtnl)
3114                 rtnl_unlock();
3115         return ok_count;
3116 }
3117 EXPORT_SYMBOL(tc_setup_cb_call);
3118
3119 /* Non-destructive filter add. If filter that wasn't already in hardware is
3120  * successfully offloaded, increment block offloads counter. On failure,
3121  * previously offloaded filter is considered to be intact and offloads counter
3122  * is not decremented.
3123  */
3124
3125 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3126                     enum tc_setup_type type, void *type_data, bool err_stop,
3127                     u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3128 {
3129         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3130         int ok_count;
3131
3132 retry:
3133         if (take_rtnl)
3134                 rtnl_lock();
3135         down_read(&block->cb_lock);
3136         /* Need to obtain rtnl lock if block is bound to devs that require it.
3137          * In block bind code cb_lock is obtained while holding rtnl, so we must
3138          * obtain the locks in same order here.
3139          */
3140         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3141                 up_read(&block->cb_lock);
3142                 take_rtnl = true;
3143                 goto retry;
3144         }
3145
3146         /* Make sure all netdevs sharing this block are offload-capable. */
3147         if (block->nooffloaddevcnt && err_stop) {
3148                 ok_count = -EOPNOTSUPP;
3149                 goto err_unlock;
3150         }
3151
3152         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3153         if (ok_count < 0)
3154                 goto err_unlock;
3155
3156         if (tp->ops->hw_add)
3157                 tp->ops->hw_add(tp, type_data);
3158         if (ok_count > 0)
3159                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3160                                           ok_count, true);
3161 err_unlock:
3162         up_read(&block->cb_lock);
3163         if (take_rtnl)
3164                 rtnl_unlock();
3165         return ok_count < 0 ? ok_count : 0;
3166 }
3167 EXPORT_SYMBOL(tc_setup_cb_add);
3168
3169 /* Destructive filter replace. If filter that wasn't already in hardware is
3170  * successfully offloaded, increment block offload counter. On failure,
3171  * previously offloaded filter is considered to be destroyed and offload counter
3172  * is decremented.
3173  */
3174
3175 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3176                         enum tc_setup_type type, void *type_data, bool err_stop,
3177                         u32 *old_flags, unsigned int *old_in_hw_count,
3178                         u32 *new_flags, unsigned int *new_in_hw_count,
3179                         bool rtnl_held)
3180 {
3181         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3182         int ok_count;
3183
3184 retry:
3185         if (take_rtnl)
3186                 rtnl_lock();
3187         down_read(&block->cb_lock);
3188         /* Need to obtain rtnl lock if block is bound to devs that require it.
3189          * In block bind code cb_lock is obtained while holding rtnl, so we must
3190          * obtain the locks in same order here.
3191          */
3192         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3193                 up_read(&block->cb_lock);
3194                 take_rtnl = true;
3195                 goto retry;
3196         }
3197
3198         /* Make sure all netdevs sharing this block are offload-capable. */
3199         if (block->nooffloaddevcnt && err_stop) {
3200                 ok_count = -EOPNOTSUPP;
3201                 goto err_unlock;
3202         }
3203
3204         tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3205         if (tp->ops->hw_del)
3206                 tp->ops->hw_del(tp, type_data);
3207
3208         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3209         if (ok_count < 0)
3210                 goto err_unlock;
3211
3212         if (tp->ops->hw_add)
3213                 tp->ops->hw_add(tp, type_data);
3214         if (ok_count > 0)
3215                 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3216                                           new_flags, ok_count, true);
3217 err_unlock:
3218         up_read(&block->cb_lock);
3219         if (take_rtnl)
3220                 rtnl_unlock();
3221         return ok_count < 0 ? ok_count : 0;
3222 }
3223 EXPORT_SYMBOL(tc_setup_cb_replace);
3224
3225 /* Destroy filter and decrement block offload counter, if filter was previously
3226  * offloaded.
3227  */
3228
3229 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3230                         enum tc_setup_type type, void *type_data, bool err_stop,
3231                         u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3232 {
3233         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3234         int ok_count;
3235
3236 retry:
3237         if (take_rtnl)
3238                 rtnl_lock();
3239         down_read(&block->cb_lock);
3240         /* Need to obtain rtnl lock if block is bound to devs that require it.
3241          * In block bind code cb_lock is obtained while holding rtnl, so we must
3242          * obtain the locks in same order here.
3243          */
3244         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3245                 up_read(&block->cb_lock);
3246                 take_rtnl = true;
3247                 goto retry;
3248         }
3249
3250         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3251
3252         tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3253         if (tp->ops->hw_del)
3254                 tp->ops->hw_del(tp, type_data);
3255
3256         up_read(&block->cb_lock);
3257         if (take_rtnl)
3258                 rtnl_unlock();
3259         return ok_count < 0 ? ok_count : 0;
3260 }
3261 EXPORT_SYMBOL(tc_setup_cb_destroy);
3262
3263 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3264                           bool add, flow_setup_cb_t *cb,
3265                           enum tc_setup_type type, void *type_data,
3266                           void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3267 {
3268         int err = cb(type, type_data, cb_priv);
3269
3270         if (err) {
3271                 if (add && tc_skip_sw(*flags))
3272                         return err;
3273         } else {
3274                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3275                                           add);
3276         }
3277
3278         return 0;
3279 }
3280 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3281
3282 void tc_cleanup_flow_action(struct flow_action *flow_action)
3283 {
3284         struct flow_action_entry *entry;
3285         int i;
3286
3287         flow_action_for_each(i, entry, flow_action)
3288                 if (entry->destructor)
3289                         entry->destructor(entry->destructor_priv);
3290 }
3291 EXPORT_SYMBOL(tc_cleanup_flow_action);
3292
3293 static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3294                                const struct tc_action *act)
3295 {
3296 #ifdef CONFIG_NET_CLS_ACT
3297         entry->dev = act->ops->get_dev(act, &entry->destructor);
3298         if (!entry->dev)
3299                 return;
3300         entry->destructor_priv = entry->dev;
3301 #endif
3302 }
3303
3304 static void tcf_tunnel_encap_put_tunnel(void *priv)
3305 {
3306         struct ip_tunnel_info *tunnel = priv;
3307
3308         kfree(tunnel);
3309 }
3310
3311 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3312                                        const struct tc_action *act)
3313 {
3314         entry->tunnel = tcf_tunnel_info_copy(act);
3315         if (!entry->tunnel)
3316                 return -ENOMEM;
3317         entry->destructor = tcf_tunnel_encap_put_tunnel;
3318         entry->destructor_priv = entry->tunnel;
3319         return 0;
3320 }
3321
3322 static void tcf_sample_get_group(struct flow_action_entry *entry,
3323                                  const struct tc_action *act)
3324 {
3325 #ifdef CONFIG_NET_CLS_ACT
3326         entry->sample.psample_group =
3327                 act->ops->get_psample_group(act, &entry->destructor);
3328         entry->destructor_priv = entry->sample.psample_group;
3329 #endif
3330 }
3331
3332 int tc_setup_flow_action(struct flow_action *flow_action,
3333                          const struct tcf_exts *exts, bool rtnl_held)
3334 {
3335         const struct tc_action *act;
3336         int i, j, k, err = 0;
3337
3338         if (!exts)
3339                 return 0;
3340
3341         if (!rtnl_held)
3342                 rtnl_lock();
3343
3344         j = 0;
3345         tcf_exts_for_each_action(i, act, exts) {
3346                 struct flow_action_entry *entry;
3347
3348                 entry = &flow_action->entries[j];
3349                 if (is_tcf_gact_ok(act)) {
3350                         entry->id = FLOW_ACTION_ACCEPT;
3351                 } else if (is_tcf_gact_shot(act)) {
3352                         entry->id = FLOW_ACTION_DROP;
3353                 } else if (is_tcf_gact_trap(act)) {
3354                         entry->id = FLOW_ACTION_TRAP;
3355                 } else if (is_tcf_gact_goto_chain(act)) {
3356                         entry->id = FLOW_ACTION_GOTO;
3357                         entry->chain_index = tcf_gact_goto_chain_index(act);
3358                 } else if (is_tcf_mirred_egress_redirect(act)) {
3359                         entry->id = FLOW_ACTION_REDIRECT;
3360                         tcf_mirred_get_dev(entry, act);
3361                 } else if (is_tcf_mirred_egress_mirror(act)) {
3362                         entry->id = FLOW_ACTION_MIRRED;
3363                         tcf_mirred_get_dev(entry, act);
3364                 } else if (is_tcf_mirred_ingress_redirect(act)) {
3365                         entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3366                         tcf_mirred_get_dev(entry, act);
3367                 } else if (is_tcf_mirred_ingress_mirror(act)) {
3368                         entry->id = FLOW_ACTION_MIRRED_INGRESS;
3369                         tcf_mirred_get_dev(entry, act);
3370                 } else if (is_tcf_vlan(act)) {
3371                         switch (tcf_vlan_action(act)) {
3372                         case TCA_VLAN_ACT_PUSH:
3373                                 entry->id = FLOW_ACTION_VLAN_PUSH;
3374                                 entry->vlan.vid = tcf_vlan_push_vid(act);
3375                                 entry->vlan.proto = tcf_vlan_push_proto(act);
3376                                 entry->vlan.prio = tcf_vlan_push_prio(act);
3377                                 break;
3378                         case TCA_VLAN_ACT_POP:
3379                                 entry->id = FLOW_ACTION_VLAN_POP;
3380                                 break;
3381                         case TCA_VLAN_ACT_MODIFY:
3382                                 entry->id = FLOW_ACTION_VLAN_MANGLE;
3383                                 entry->vlan.vid = tcf_vlan_push_vid(act);
3384                                 entry->vlan.proto = tcf_vlan_push_proto(act);
3385                                 entry->vlan.prio = tcf_vlan_push_prio(act);
3386                                 break;
3387                         default:
3388                                 err = -EOPNOTSUPP;
3389                                 goto err_out;
3390                         }
3391                 } else if (is_tcf_tunnel_set(act)) {
3392                         entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3393                         err = tcf_tunnel_encap_get_tunnel(entry, act);
3394                         if (err)
3395                                 goto err_out;
3396                 } else if (is_tcf_tunnel_release(act)) {
3397                         entry->id = FLOW_ACTION_TUNNEL_DECAP;
3398                 } else if (is_tcf_pedit(act)) {
3399                         for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3400                                 switch (tcf_pedit_cmd(act, k)) {
3401                                 case TCA_PEDIT_KEY_EX_CMD_SET:
3402                                         entry->id = FLOW_ACTION_MANGLE;
3403                                         break;
3404                                 case TCA_PEDIT_KEY_EX_CMD_ADD:
3405                                         entry->id = FLOW_ACTION_ADD;
3406                                         break;
3407                                 default:
3408                                         err = -EOPNOTSUPP;
3409                                         goto err_out;
3410                                 }
3411                                 entry->mangle.htype = tcf_pedit_htype(act, k);
3412                                 entry->mangle.mask = tcf_pedit_mask(act, k);
3413                                 entry->mangle.val = tcf_pedit_val(act, k);
3414                                 entry->mangle.offset = tcf_pedit_offset(act, k);
3415                                 entry = &flow_action->entries[++j];
3416                         }
3417                 } else if (is_tcf_csum(act)) {
3418                         entry->id = FLOW_ACTION_CSUM;
3419                         entry->csum_flags = tcf_csum_update_flags(act);
3420                 } else if (is_tcf_skbedit_mark(act)) {
3421                         entry->id = FLOW_ACTION_MARK;
3422                         entry->mark = tcf_skbedit_mark(act);
3423                 } else if (is_tcf_sample(act)) {
3424                         entry->id = FLOW_ACTION_SAMPLE;
3425                         entry->sample.trunc_size = tcf_sample_trunc_size(act);
3426                         entry->sample.truncate = tcf_sample_truncate(act);
3427                         entry->sample.rate = tcf_sample_rate(act);
3428                         tcf_sample_get_group(entry, act);
3429                 } else if (is_tcf_police(act)) {
3430                         entry->id = FLOW_ACTION_POLICE;
3431                         entry->police.burst = tcf_police_tcfp_burst(act);
3432                         entry->police.rate_bytes_ps =
3433                                 tcf_police_rate_bytes_ps(act);
3434                 } else if (is_tcf_ct(act)) {
3435                         entry->id = FLOW_ACTION_CT;
3436                         entry->ct.action = tcf_ct_action(act);
3437                         entry->ct.zone = tcf_ct_zone(act);
3438                 } else if (is_tcf_mpls(act)) {
3439                         switch (tcf_mpls_action(act)) {
3440                         case TCA_MPLS_ACT_PUSH:
3441                                 entry->id = FLOW_ACTION_MPLS_PUSH;
3442                                 entry->mpls_push.proto = tcf_mpls_proto(act);
3443                                 entry->mpls_push.label = tcf_mpls_label(act);
3444                                 entry->mpls_push.tc = tcf_mpls_tc(act);
3445                                 entry->mpls_push.bos = tcf_mpls_bos(act);
3446                                 entry->mpls_push.ttl = tcf_mpls_ttl(act);
3447                                 break;
3448                         case TCA_MPLS_ACT_POP:
3449                                 entry->id = FLOW_ACTION_MPLS_POP;
3450                                 entry->mpls_pop.proto = tcf_mpls_proto(act);
3451                                 break;
3452                         case TCA_MPLS_ACT_MODIFY:
3453                                 entry->id = FLOW_ACTION_MPLS_MANGLE;
3454                                 entry->mpls_mangle.label = tcf_mpls_label(act);
3455                                 entry->mpls_mangle.tc = tcf_mpls_tc(act);
3456                                 entry->mpls_mangle.bos = tcf_mpls_bos(act);
3457                                 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3458                                 break;
3459                         default:
3460                                 goto err_out;
3461                         }
3462                 } else if (is_tcf_skbedit_ptype(act)) {
3463                         entry->id = FLOW_ACTION_PTYPE;
3464                         entry->ptype = tcf_skbedit_ptype(act);
3465                 } else {
3466                         err = -EOPNOTSUPP;
3467                         goto err_out;
3468                 }
3469
3470                 if (!is_tcf_pedit(act))
3471                         j++;
3472         }
3473
3474 err_out:
3475         if (!rtnl_held)
3476                 rtnl_unlock();
3477
3478         if (err)
3479                 tc_cleanup_flow_action(flow_action);
3480
3481         return err;
3482 }
3483 EXPORT_SYMBOL(tc_setup_flow_action);
3484
3485 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3486 {
3487         unsigned int num_acts = 0;
3488         struct tc_action *act;
3489         int i;
3490
3491         tcf_exts_for_each_action(i, act, exts) {
3492                 if (is_tcf_pedit(act))
3493                         num_acts += tcf_pedit_nkeys(act);
3494                 else
3495                         num_acts++;
3496         }
3497         return num_acts;
3498 }
3499 EXPORT_SYMBOL(tcf_exts_num_actions);
3500
3501 static __net_init int tcf_net_init(struct net *net)
3502 {
3503         struct tcf_net *tn = net_generic(net, tcf_net_id);
3504
3505         spin_lock_init(&tn->idr_lock);
3506         idr_init(&tn->idr);
3507         return 0;
3508 }
3509
3510 static void __net_exit tcf_net_exit(struct net *net)
3511 {
3512         struct tcf_net *tn = net_generic(net, tcf_net_id);
3513
3514         idr_destroy(&tn->idr);
3515 }
3516
3517 static struct pernet_operations tcf_net_ops = {
3518         .init = tcf_net_init,
3519         .exit = tcf_net_exit,
3520         .id   = &tcf_net_id,
3521         .size = sizeof(struct tcf_net),
3522 };
3523
3524 static struct flow_indr_block_ing_entry block_ing_entry = {
3525         .cb = tc_indr_block_get_and_ing_cmd,
3526         .list = LIST_HEAD_INIT(block_ing_entry.list),
3527 };
3528
3529 static int __init tc_filter_init(void)
3530 {
3531         int err;
3532
3533         tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3534         if (!tc_filter_wq)
3535                 return -ENOMEM;
3536
3537         err = register_pernet_subsys(&tcf_net_ops);
3538         if (err)
3539                 goto err_register_pernet_subsys;
3540
3541         flow_indr_add_block_ing_cb(&block_ing_entry);
3542
3543         rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3544                       RTNL_FLAG_DOIT_UNLOCKED);
3545         rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3546                       RTNL_FLAG_DOIT_UNLOCKED);
3547         rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3548                       tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3549         rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3550         rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3551         rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3552                       tc_dump_chain, 0);
3553
3554         return 0;
3555
3556 err_register_pernet_subsys:
3557         destroy_workqueue(tc_filter_wq);
3558         return err;
3559 }
3560
3561 subsys_initcall(tc_filter_init);