net: flow_offload: fix flow_indr_dev_unregister path
[linux-2.6-microblaze.git] / net / sched / cls_api.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c  Packet classifier API.
4  *
5  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/rhashtable.h>
24 #include <linux/jhash.h>
25 #include <linux/rculist.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
44
45 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
46
47 /* The list of all installed classifier types */
48 static LIST_HEAD(tcf_proto_base);
49
50 /* Protects list of registered TC modules. It is pure SMP lock. */
51 static DEFINE_RWLOCK(cls_mod_lock);
52
53 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
54 {
55         return jhash_3words(tp->chain->index, tp->prio,
56                             (__force __u32)tp->protocol, 0);
57 }
58
59 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
60                                         struct tcf_proto *tp)
61 {
62         struct tcf_block *block = chain->block;
63
64         mutex_lock(&block->proto_destroy_lock);
65         hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
66                      destroy_obj_hashfn(tp));
67         mutex_unlock(&block->proto_destroy_lock);
68 }
69
70 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
71                           const struct tcf_proto *tp2)
72 {
73         return tp1->chain->index == tp2->chain->index &&
74                tp1->prio == tp2->prio &&
75                tp1->protocol == tp2->protocol;
76 }
77
78 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
79                                         struct tcf_proto *tp)
80 {
81         u32 hash = destroy_obj_hashfn(tp);
82         struct tcf_proto *iter;
83         bool found = false;
84
85         rcu_read_lock();
86         hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
87                                    destroy_ht_node, hash) {
88                 if (tcf_proto_cmp(tp, iter)) {
89                         found = true;
90                         break;
91                 }
92         }
93         rcu_read_unlock();
94
95         return found;
96 }
97
98 static void
99 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
100 {
101         struct tcf_block *block = chain->block;
102
103         mutex_lock(&block->proto_destroy_lock);
104         if (hash_hashed(&tp->destroy_ht_node))
105                 hash_del_rcu(&tp->destroy_ht_node);
106         mutex_unlock(&block->proto_destroy_lock);
107 }
108
109 /* Find classifier type by string name */
110
111 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
112 {
113         const struct tcf_proto_ops *t, *res = NULL;
114
115         if (kind) {
116                 read_lock(&cls_mod_lock);
117                 list_for_each_entry(t, &tcf_proto_base, head) {
118                         if (strcmp(kind, t->kind) == 0) {
119                                 if (try_module_get(t->owner))
120                                         res = t;
121                                 break;
122                         }
123                 }
124                 read_unlock(&cls_mod_lock);
125         }
126         return res;
127 }
128
129 static const struct tcf_proto_ops *
130 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
131                      struct netlink_ext_ack *extack)
132 {
133         const struct tcf_proto_ops *ops;
134
135         ops = __tcf_proto_lookup_ops(kind);
136         if (ops)
137                 return ops;
138 #ifdef CONFIG_MODULES
139         if (rtnl_held)
140                 rtnl_unlock();
141         request_module("cls_%s", kind);
142         if (rtnl_held)
143                 rtnl_lock();
144         ops = __tcf_proto_lookup_ops(kind);
145         /* We dropped the RTNL semaphore in order to perform
146          * the module load. So, even if we succeeded in loading
147          * the module we have to replay the request. We indicate
148          * this using -EAGAIN.
149          */
150         if (ops) {
151                 module_put(ops->owner);
152                 return ERR_PTR(-EAGAIN);
153         }
154 #endif
155         NL_SET_ERR_MSG(extack, "TC classifier not found");
156         return ERR_PTR(-ENOENT);
157 }
158
159 /* Register(unregister) new classifier type */
160
161 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
162 {
163         struct tcf_proto_ops *t;
164         int rc = -EEXIST;
165
166         write_lock(&cls_mod_lock);
167         list_for_each_entry(t, &tcf_proto_base, head)
168                 if (!strcmp(ops->kind, t->kind))
169                         goto out;
170
171         list_add_tail(&ops->head, &tcf_proto_base);
172         rc = 0;
173 out:
174         write_unlock(&cls_mod_lock);
175         return rc;
176 }
177 EXPORT_SYMBOL(register_tcf_proto_ops);
178
179 static struct workqueue_struct *tc_filter_wq;
180
181 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
182 {
183         struct tcf_proto_ops *t;
184         int rc = -ENOENT;
185
186         /* Wait for outstanding call_rcu()s, if any, from a
187          * tcf_proto_ops's destroy() handler.
188          */
189         rcu_barrier();
190         flush_workqueue(tc_filter_wq);
191
192         write_lock(&cls_mod_lock);
193         list_for_each_entry(t, &tcf_proto_base, head) {
194                 if (t == ops) {
195                         list_del(&t->head);
196                         rc = 0;
197                         break;
198                 }
199         }
200         write_unlock(&cls_mod_lock);
201         return rc;
202 }
203 EXPORT_SYMBOL(unregister_tcf_proto_ops);
204
205 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
206 {
207         INIT_RCU_WORK(rwork, func);
208         return queue_rcu_work(tc_filter_wq, rwork);
209 }
210 EXPORT_SYMBOL(tcf_queue_work);
211
212 /* Select new prio value from the range, managed by kernel. */
213
214 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
215 {
216         u32 first = TC_H_MAKE(0xC0000000U, 0U);
217
218         if (tp)
219                 first = tp->prio - 1;
220
221         return TC_H_MAJ(first);
222 }
223
224 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
225 {
226         if (kind)
227                 return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
228         memset(name, 0, IFNAMSIZ);
229         return false;
230 }
231
232 static bool tcf_proto_is_unlocked(const char *kind)
233 {
234         const struct tcf_proto_ops *ops;
235         bool ret;
236
237         if (strlen(kind) == 0)
238                 return false;
239
240         ops = tcf_proto_lookup_ops(kind, false, NULL);
241         /* On error return false to take rtnl lock. Proto lookup/create
242          * functions will perform lookup again and properly handle errors.
243          */
244         if (IS_ERR(ops))
245                 return false;
246
247         ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
248         module_put(ops->owner);
249         return ret;
250 }
251
252 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
253                                           u32 prio, struct tcf_chain *chain,
254                                           bool rtnl_held,
255                                           struct netlink_ext_ack *extack)
256 {
257         struct tcf_proto *tp;
258         int err;
259
260         tp = kzalloc(sizeof(*tp), GFP_KERNEL);
261         if (!tp)
262                 return ERR_PTR(-ENOBUFS);
263
264         tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
265         if (IS_ERR(tp->ops)) {
266                 err = PTR_ERR(tp->ops);
267                 goto errout;
268         }
269         tp->classify = tp->ops->classify;
270         tp->protocol = protocol;
271         tp->prio = prio;
272         tp->chain = chain;
273         spin_lock_init(&tp->lock);
274         refcount_set(&tp->refcnt, 1);
275
276         err = tp->ops->init(tp);
277         if (err) {
278                 module_put(tp->ops->owner);
279                 goto errout;
280         }
281         return tp;
282
283 errout:
284         kfree(tp);
285         return ERR_PTR(err);
286 }
287
288 static void tcf_proto_get(struct tcf_proto *tp)
289 {
290         refcount_inc(&tp->refcnt);
291 }
292
293 static void tcf_chain_put(struct tcf_chain *chain);
294
295 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
296                               bool sig_destroy, struct netlink_ext_ack *extack)
297 {
298         tp->ops->destroy(tp, rtnl_held, extack);
299         if (sig_destroy)
300                 tcf_proto_signal_destroyed(tp->chain, tp);
301         tcf_chain_put(tp->chain);
302         module_put(tp->ops->owner);
303         kfree_rcu(tp, rcu);
304 }
305
306 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
307                           struct netlink_ext_ack *extack)
308 {
309         if (refcount_dec_and_test(&tp->refcnt))
310                 tcf_proto_destroy(tp, rtnl_held, true, extack);
311 }
312
313 static bool tcf_proto_check_delete(struct tcf_proto *tp)
314 {
315         if (tp->ops->delete_empty)
316                 return tp->ops->delete_empty(tp);
317
318         tp->deleting = true;
319         return tp->deleting;
320 }
321
322 static void tcf_proto_mark_delete(struct tcf_proto *tp)
323 {
324         spin_lock(&tp->lock);
325         tp->deleting = true;
326         spin_unlock(&tp->lock);
327 }
328
329 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
330 {
331         bool deleting;
332
333         spin_lock(&tp->lock);
334         deleting = tp->deleting;
335         spin_unlock(&tp->lock);
336
337         return deleting;
338 }
339
340 #define ASSERT_BLOCK_LOCKED(block)                                      \
341         lockdep_assert_held(&(block)->lock)
342
343 struct tcf_filter_chain_list_item {
344         struct list_head list;
345         tcf_chain_head_change_t *chain_head_change;
346         void *chain_head_change_priv;
347 };
348
349 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
350                                           u32 chain_index)
351 {
352         struct tcf_chain *chain;
353
354         ASSERT_BLOCK_LOCKED(block);
355
356         chain = kzalloc(sizeof(*chain), GFP_KERNEL);
357         if (!chain)
358                 return NULL;
359         list_add_tail_rcu(&chain->list, &block->chain_list);
360         mutex_init(&chain->filter_chain_lock);
361         chain->block = block;
362         chain->index = chain_index;
363         chain->refcnt = 1;
364         if (!chain->index)
365                 block->chain0.chain = chain;
366         return chain;
367 }
368
369 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
370                                        struct tcf_proto *tp_head)
371 {
372         if (item->chain_head_change)
373                 item->chain_head_change(tp_head, item->chain_head_change_priv);
374 }
375
376 static void tcf_chain0_head_change(struct tcf_chain *chain,
377                                    struct tcf_proto *tp_head)
378 {
379         struct tcf_filter_chain_list_item *item;
380         struct tcf_block *block = chain->block;
381
382         if (chain->index)
383                 return;
384
385         mutex_lock(&block->lock);
386         list_for_each_entry(item, &block->chain0.filter_chain_list, list)
387                 tcf_chain_head_change_item(item, tp_head);
388         mutex_unlock(&block->lock);
389 }
390
391 /* Returns true if block can be safely freed. */
392
393 static bool tcf_chain_detach(struct tcf_chain *chain)
394 {
395         struct tcf_block *block = chain->block;
396
397         ASSERT_BLOCK_LOCKED(block);
398
399         list_del_rcu(&chain->list);
400         if (!chain->index)
401                 block->chain0.chain = NULL;
402
403         if (list_empty(&block->chain_list) &&
404             refcount_read(&block->refcnt) == 0)
405                 return true;
406
407         return false;
408 }
409
410 static void tcf_block_destroy(struct tcf_block *block)
411 {
412         mutex_destroy(&block->lock);
413         mutex_destroy(&block->proto_destroy_lock);
414         kfree_rcu(block, rcu);
415 }
416
417 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
418 {
419         struct tcf_block *block = chain->block;
420
421         mutex_destroy(&chain->filter_chain_lock);
422         kfree_rcu(chain, rcu);
423         if (free_block)
424                 tcf_block_destroy(block);
425 }
426
427 static void tcf_chain_hold(struct tcf_chain *chain)
428 {
429         ASSERT_BLOCK_LOCKED(chain->block);
430
431         ++chain->refcnt;
432 }
433
434 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
435 {
436         ASSERT_BLOCK_LOCKED(chain->block);
437
438         /* In case all the references are action references, this
439          * chain should not be shown to the user.
440          */
441         return chain->refcnt == chain->action_refcnt;
442 }
443
444 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
445                                           u32 chain_index)
446 {
447         struct tcf_chain *chain;
448
449         ASSERT_BLOCK_LOCKED(block);
450
451         list_for_each_entry(chain, &block->chain_list, list) {
452                 if (chain->index == chain_index)
453                         return chain;
454         }
455         return NULL;
456 }
457
458 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
459 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
460                                               u32 chain_index)
461 {
462         struct tcf_chain *chain;
463
464         list_for_each_entry_rcu(chain, &block->chain_list, list) {
465                 if (chain->index == chain_index)
466                         return chain;
467         }
468         return NULL;
469 }
470 #endif
471
472 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
473                            u32 seq, u16 flags, int event, bool unicast);
474
475 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
476                                          u32 chain_index, bool create,
477                                          bool by_act)
478 {
479         struct tcf_chain *chain = NULL;
480         bool is_first_reference;
481
482         mutex_lock(&block->lock);
483         chain = tcf_chain_lookup(block, chain_index);
484         if (chain) {
485                 tcf_chain_hold(chain);
486         } else {
487                 if (!create)
488                         goto errout;
489                 chain = tcf_chain_create(block, chain_index);
490                 if (!chain)
491                         goto errout;
492         }
493
494         if (by_act)
495                 ++chain->action_refcnt;
496         is_first_reference = chain->refcnt - chain->action_refcnt == 1;
497         mutex_unlock(&block->lock);
498
499         /* Send notification only in case we got the first
500          * non-action reference. Until then, the chain acts only as
501          * a placeholder for actions pointing to it and user ought
502          * not know about them.
503          */
504         if (is_first_reference && !by_act)
505                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
506                                 RTM_NEWCHAIN, false);
507
508         return chain;
509
510 errout:
511         mutex_unlock(&block->lock);
512         return chain;
513 }
514
515 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
516                                        bool create)
517 {
518         return __tcf_chain_get(block, chain_index, create, false);
519 }
520
521 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
522 {
523         return __tcf_chain_get(block, chain_index, true, true);
524 }
525 EXPORT_SYMBOL(tcf_chain_get_by_act);
526
527 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
528                                void *tmplt_priv);
529 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
530                                   void *tmplt_priv, u32 chain_index,
531                                   struct tcf_block *block, struct sk_buff *oskb,
532                                   u32 seq, u16 flags, bool unicast);
533
534 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
535                             bool explicitly_created)
536 {
537         struct tcf_block *block = chain->block;
538         const struct tcf_proto_ops *tmplt_ops;
539         bool free_block = false;
540         unsigned int refcnt;
541         void *tmplt_priv;
542
543         mutex_lock(&block->lock);
544         if (explicitly_created) {
545                 if (!chain->explicitly_created) {
546                         mutex_unlock(&block->lock);
547                         return;
548                 }
549                 chain->explicitly_created = false;
550         }
551
552         if (by_act)
553                 chain->action_refcnt--;
554
555         /* tc_chain_notify_delete can't be called while holding block lock.
556          * However, when block is unlocked chain can be changed concurrently, so
557          * save these to temporary variables.
558          */
559         refcnt = --chain->refcnt;
560         tmplt_ops = chain->tmplt_ops;
561         tmplt_priv = chain->tmplt_priv;
562
563         /* The last dropped non-action reference will trigger notification. */
564         if (refcnt - chain->action_refcnt == 0 && !by_act) {
565                 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
566                                        block, NULL, 0, 0, false);
567                 /* Last reference to chain, no need to lock. */
568                 chain->flushing = false;
569         }
570
571         if (refcnt == 0)
572                 free_block = tcf_chain_detach(chain);
573         mutex_unlock(&block->lock);
574
575         if (refcnt == 0) {
576                 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
577                 tcf_chain_destroy(chain, free_block);
578         }
579 }
580
581 static void tcf_chain_put(struct tcf_chain *chain)
582 {
583         __tcf_chain_put(chain, false, false);
584 }
585
586 void tcf_chain_put_by_act(struct tcf_chain *chain)
587 {
588         __tcf_chain_put(chain, true, false);
589 }
590 EXPORT_SYMBOL(tcf_chain_put_by_act);
591
592 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
593 {
594         __tcf_chain_put(chain, false, true);
595 }
596
597 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
598 {
599         struct tcf_proto *tp, *tp_next;
600
601         mutex_lock(&chain->filter_chain_lock);
602         tp = tcf_chain_dereference(chain->filter_chain, chain);
603         while (tp) {
604                 tp_next = rcu_dereference_protected(tp->next, 1);
605                 tcf_proto_signal_destroying(chain, tp);
606                 tp = tp_next;
607         }
608         tp = tcf_chain_dereference(chain->filter_chain, chain);
609         RCU_INIT_POINTER(chain->filter_chain, NULL);
610         tcf_chain0_head_change(chain, NULL);
611         chain->flushing = true;
612         mutex_unlock(&chain->filter_chain_lock);
613
614         while (tp) {
615                 tp_next = rcu_dereference_protected(tp->next, 1);
616                 tcf_proto_put(tp, rtnl_held, NULL);
617                 tp = tp_next;
618         }
619 }
620
621 static int tcf_block_setup(struct tcf_block *block,
622                            struct flow_block_offload *bo);
623
624 static void tcf_block_offload_init(struct flow_block_offload *bo,
625                                    struct net_device *dev,
626                                    enum flow_block_command command,
627                                    enum flow_block_binder_type binder_type,
628                                    struct flow_block *flow_block,
629                                    bool shared, struct netlink_ext_ack *extack)
630 {
631         bo->net = dev_net(dev);
632         bo->command = command;
633         bo->binder_type = binder_type;
634         bo->block = flow_block;
635         bo->block_shared = shared;
636         bo->extack = extack;
637         INIT_LIST_HEAD(&bo->cb_list);
638 }
639
640 static void tcf_block_unbind(struct tcf_block *block,
641                              struct flow_block_offload *bo);
642
643 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
644 {
645         struct tcf_block *block = block_cb->indr.data;
646         struct net_device *dev = block_cb->indr.dev;
647         struct netlink_ext_ack extack = {};
648         struct flow_block_offload bo;
649
650         tcf_block_offload_init(&bo, dev, FLOW_BLOCK_UNBIND,
651                                block_cb->indr.binder_type,
652                                &block->flow_block, tcf_block_shared(block),
653                                &extack);
654         down_write(&block->cb_lock);
655         list_del(&block_cb->driver_list);
656         list_move(&block_cb->list, &bo.cb_list);
657         up_write(&block->cb_lock);
658         rtnl_lock();
659         tcf_block_unbind(block, &bo);
660         rtnl_unlock();
661 }
662
663 static bool tcf_block_offload_in_use(struct tcf_block *block)
664 {
665         return atomic_read(&block->offloadcnt);
666 }
667
668 static int tcf_block_offload_cmd(struct tcf_block *block,
669                                  struct net_device *dev,
670                                  struct tcf_block_ext_info *ei,
671                                  enum flow_block_command command,
672                                  struct netlink_ext_ack *extack)
673 {
674         struct flow_block_offload bo = {};
675         int err;
676
677         tcf_block_offload_init(&bo, dev, command, ei->binder_type,
678                                &block->flow_block, tcf_block_shared(block),
679                                extack);
680
681         if (dev->netdev_ops->ndo_setup_tc)
682                 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
683         else
684                 err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block,
685                                                   &bo, tc_block_indr_cleanup);
686
687         if (err < 0) {
688                 if (err != -EOPNOTSUPP)
689                         NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
690                 return err;
691         }
692
693         return tcf_block_setup(block, &bo);
694 }
695
696 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
697                                   struct tcf_block_ext_info *ei,
698                                   struct netlink_ext_ack *extack)
699 {
700         struct net_device *dev = q->dev_queue->dev;
701         int err;
702
703         down_write(&block->cb_lock);
704
705         /* If tc offload feature is disabled and the block we try to bind
706          * to already has some offloaded filters, forbid to bind.
707          */
708         if (dev->netdev_ops->ndo_setup_tc &&
709             !tc_can_offload(dev) &&
710             tcf_block_offload_in_use(block)) {
711                 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
712                 err = -EOPNOTSUPP;
713                 goto err_unlock;
714         }
715
716         err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
717         if (err == -EOPNOTSUPP)
718                 goto no_offload_dev_inc;
719         if (err)
720                 goto err_unlock;
721
722         up_write(&block->cb_lock);
723         return 0;
724
725 no_offload_dev_inc:
726         if (tcf_block_offload_in_use(block))
727                 goto err_unlock;
728
729         err = 0;
730         block->nooffloaddevcnt++;
731 err_unlock:
732         up_write(&block->cb_lock);
733         return err;
734 }
735
736 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
737                                      struct tcf_block_ext_info *ei)
738 {
739         struct net_device *dev = q->dev_queue->dev;
740         int err;
741
742         down_write(&block->cb_lock);
743         err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
744         if (err == -EOPNOTSUPP)
745                 goto no_offload_dev_dec;
746         up_write(&block->cb_lock);
747         return;
748
749 no_offload_dev_dec:
750         WARN_ON(block->nooffloaddevcnt-- == 0);
751         up_write(&block->cb_lock);
752 }
753
754 static int
755 tcf_chain0_head_change_cb_add(struct tcf_block *block,
756                               struct tcf_block_ext_info *ei,
757                               struct netlink_ext_ack *extack)
758 {
759         struct tcf_filter_chain_list_item *item;
760         struct tcf_chain *chain0;
761
762         item = kmalloc(sizeof(*item), GFP_KERNEL);
763         if (!item) {
764                 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
765                 return -ENOMEM;
766         }
767         item->chain_head_change = ei->chain_head_change;
768         item->chain_head_change_priv = ei->chain_head_change_priv;
769
770         mutex_lock(&block->lock);
771         chain0 = block->chain0.chain;
772         if (chain0)
773                 tcf_chain_hold(chain0);
774         else
775                 list_add(&item->list, &block->chain0.filter_chain_list);
776         mutex_unlock(&block->lock);
777
778         if (chain0) {
779                 struct tcf_proto *tp_head;
780
781                 mutex_lock(&chain0->filter_chain_lock);
782
783                 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
784                 if (tp_head)
785                         tcf_chain_head_change_item(item, tp_head);
786
787                 mutex_lock(&block->lock);
788                 list_add(&item->list, &block->chain0.filter_chain_list);
789                 mutex_unlock(&block->lock);
790
791                 mutex_unlock(&chain0->filter_chain_lock);
792                 tcf_chain_put(chain0);
793         }
794
795         return 0;
796 }
797
798 static void
799 tcf_chain0_head_change_cb_del(struct tcf_block *block,
800                               struct tcf_block_ext_info *ei)
801 {
802         struct tcf_filter_chain_list_item *item;
803
804         mutex_lock(&block->lock);
805         list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
806                 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
807                     (item->chain_head_change == ei->chain_head_change &&
808                      item->chain_head_change_priv == ei->chain_head_change_priv)) {
809                         if (block->chain0.chain)
810                                 tcf_chain_head_change_item(item, NULL);
811                         list_del(&item->list);
812                         mutex_unlock(&block->lock);
813
814                         kfree(item);
815                         return;
816                 }
817         }
818         mutex_unlock(&block->lock);
819         WARN_ON(1);
820 }
821
822 struct tcf_net {
823         spinlock_t idr_lock; /* Protects idr */
824         struct idr idr;
825 };
826
827 static unsigned int tcf_net_id;
828
829 static int tcf_block_insert(struct tcf_block *block, struct net *net,
830                             struct netlink_ext_ack *extack)
831 {
832         struct tcf_net *tn = net_generic(net, tcf_net_id);
833         int err;
834
835         idr_preload(GFP_KERNEL);
836         spin_lock(&tn->idr_lock);
837         err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
838                             GFP_NOWAIT);
839         spin_unlock(&tn->idr_lock);
840         idr_preload_end();
841
842         return err;
843 }
844
845 static void tcf_block_remove(struct tcf_block *block, struct net *net)
846 {
847         struct tcf_net *tn = net_generic(net, tcf_net_id);
848
849         spin_lock(&tn->idr_lock);
850         idr_remove(&tn->idr, block->index);
851         spin_unlock(&tn->idr_lock);
852 }
853
854 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
855                                           u32 block_index,
856                                           struct netlink_ext_ack *extack)
857 {
858         struct tcf_block *block;
859
860         block = kzalloc(sizeof(*block), GFP_KERNEL);
861         if (!block) {
862                 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
863                 return ERR_PTR(-ENOMEM);
864         }
865         mutex_init(&block->lock);
866         mutex_init(&block->proto_destroy_lock);
867         init_rwsem(&block->cb_lock);
868         flow_block_init(&block->flow_block);
869         INIT_LIST_HEAD(&block->chain_list);
870         INIT_LIST_HEAD(&block->owner_list);
871         INIT_LIST_HEAD(&block->chain0.filter_chain_list);
872
873         refcount_set(&block->refcnt, 1);
874         block->net = net;
875         block->index = block_index;
876
877         /* Don't store q pointer for blocks which are shared */
878         if (!tcf_block_shared(block))
879                 block->q = q;
880         return block;
881 }
882
883 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
884 {
885         struct tcf_net *tn = net_generic(net, tcf_net_id);
886
887         return idr_find(&tn->idr, block_index);
888 }
889
890 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
891 {
892         struct tcf_block *block;
893
894         rcu_read_lock();
895         block = tcf_block_lookup(net, block_index);
896         if (block && !refcount_inc_not_zero(&block->refcnt))
897                 block = NULL;
898         rcu_read_unlock();
899
900         return block;
901 }
902
903 static struct tcf_chain *
904 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
905 {
906         mutex_lock(&block->lock);
907         if (chain)
908                 chain = list_is_last(&chain->list, &block->chain_list) ?
909                         NULL : list_next_entry(chain, list);
910         else
911                 chain = list_first_entry_or_null(&block->chain_list,
912                                                  struct tcf_chain, list);
913
914         /* skip all action-only chains */
915         while (chain && tcf_chain_held_by_acts_only(chain))
916                 chain = list_is_last(&chain->list, &block->chain_list) ?
917                         NULL : list_next_entry(chain, list);
918
919         if (chain)
920                 tcf_chain_hold(chain);
921         mutex_unlock(&block->lock);
922
923         return chain;
924 }
925
926 /* Function to be used by all clients that want to iterate over all chains on
927  * block. It properly obtains block->lock and takes reference to chain before
928  * returning it. Users of this function must be tolerant to concurrent chain
929  * insertion/deletion or ensure that no concurrent chain modification is
930  * possible. Note that all netlink dump callbacks cannot guarantee to provide
931  * consistent dump because rtnl lock is released each time skb is filled with
932  * data and sent to user-space.
933  */
934
935 struct tcf_chain *
936 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
937 {
938         struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
939
940         if (chain)
941                 tcf_chain_put(chain);
942
943         return chain_next;
944 }
945 EXPORT_SYMBOL(tcf_get_next_chain);
946
947 static struct tcf_proto *
948 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
949 {
950         u32 prio = 0;
951
952         ASSERT_RTNL();
953         mutex_lock(&chain->filter_chain_lock);
954
955         if (!tp) {
956                 tp = tcf_chain_dereference(chain->filter_chain, chain);
957         } else if (tcf_proto_is_deleting(tp)) {
958                 /* 'deleting' flag is set and chain->filter_chain_lock was
959                  * unlocked, which means next pointer could be invalid. Restart
960                  * search.
961                  */
962                 prio = tp->prio + 1;
963                 tp = tcf_chain_dereference(chain->filter_chain, chain);
964
965                 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
966                         if (!tp->deleting && tp->prio >= prio)
967                                 break;
968         } else {
969                 tp = tcf_chain_dereference(tp->next, chain);
970         }
971
972         if (tp)
973                 tcf_proto_get(tp);
974
975         mutex_unlock(&chain->filter_chain_lock);
976
977         return tp;
978 }
979
980 /* Function to be used by all clients that want to iterate over all tp's on
981  * chain. Users of this function must be tolerant to concurrent tp
982  * insertion/deletion or ensure that no concurrent chain modification is
983  * possible. Note that all netlink dump callbacks cannot guarantee to provide
984  * consistent dump because rtnl lock is released each time skb is filled with
985  * data and sent to user-space.
986  */
987
988 struct tcf_proto *
989 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
990                    bool rtnl_held)
991 {
992         struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
993
994         if (tp)
995                 tcf_proto_put(tp, rtnl_held, NULL);
996
997         return tp_next;
998 }
999 EXPORT_SYMBOL(tcf_get_next_proto);
1000
1001 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1002 {
1003         struct tcf_chain *chain;
1004
1005         /* Last reference to block. At this point chains cannot be added or
1006          * removed concurrently.
1007          */
1008         for (chain = tcf_get_next_chain(block, NULL);
1009              chain;
1010              chain = tcf_get_next_chain(block, chain)) {
1011                 tcf_chain_put_explicitly_created(chain);
1012                 tcf_chain_flush(chain, rtnl_held);
1013         }
1014 }
1015
1016 /* Lookup Qdisc and increments its reference counter.
1017  * Set parent, if necessary.
1018  */
1019
1020 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1021                             u32 *parent, int ifindex, bool rtnl_held,
1022                             struct netlink_ext_ack *extack)
1023 {
1024         const struct Qdisc_class_ops *cops;
1025         struct net_device *dev;
1026         int err = 0;
1027
1028         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1029                 return 0;
1030
1031         rcu_read_lock();
1032
1033         /* Find link */
1034         dev = dev_get_by_index_rcu(net, ifindex);
1035         if (!dev) {
1036                 rcu_read_unlock();
1037                 return -ENODEV;
1038         }
1039
1040         /* Find qdisc */
1041         if (!*parent) {
1042                 *q = dev->qdisc;
1043                 *parent = (*q)->handle;
1044         } else {
1045                 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1046                 if (!*q) {
1047                         NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1048                         err = -EINVAL;
1049                         goto errout_rcu;
1050                 }
1051         }
1052
1053         *q = qdisc_refcount_inc_nz(*q);
1054         if (!*q) {
1055                 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1056                 err = -EINVAL;
1057                 goto errout_rcu;
1058         }
1059
1060         /* Is it classful? */
1061         cops = (*q)->ops->cl_ops;
1062         if (!cops) {
1063                 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1064                 err = -EINVAL;
1065                 goto errout_qdisc;
1066         }
1067
1068         if (!cops->tcf_block) {
1069                 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1070                 err = -EOPNOTSUPP;
1071                 goto errout_qdisc;
1072         }
1073
1074 errout_rcu:
1075         /* At this point we know that qdisc is not noop_qdisc,
1076          * which means that qdisc holds a reference to net_device
1077          * and we hold a reference to qdisc, so it is safe to release
1078          * rcu read lock.
1079          */
1080         rcu_read_unlock();
1081         return err;
1082
1083 errout_qdisc:
1084         rcu_read_unlock();
1085
1086         if (rtnl_held)
1087                 qdisc_put(*q);
1088         else
1089                 qdisc_put_unlocked(*q);
1090         *q = NULL;
1091
1092         return err;
1093 }
1094
1095 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1096                                int ifindex, struct netlink_ext_ack *extack)
1097 {
1098         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1099                 return 0;
1100
1101         /* Do we search for filter, attached to class? */
1102         if (TC_H_MIN(parent)) {
1103                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1104
1105                 *cl = cops->find(q, parent);
1106                 if (*cl == 0) {
1107                         NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1108                         return -ENOENT;
1109                 }
1110         }
1111
1112         return 0;
1113 }
1114
1115 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1116                                           unsigned long cl, int ifindex,
1117                                           u32 block_index,
1118                                           struct netlink_ext_ack *extack)
1119 {
1120         struct tcf_block *block;
1121
1122         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1123                 block = tcf_block_refcnt_get(net, block_index);
1124                 if (!block) {
1125                         NL_SET_ERR_MSG(extack, "Block of given index was not found");
1126                         return ERR_PTR(-EINVAL);
1127                 }
1128         } else {
1129                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1130
1131                 block = cops->tcf_block(q, cl, extack);
1132                 if (!block)
1133                         return ERR_PTR(-EINVAL);
1134
1135                 if (tcf_block_shared(block)) {
1136                         NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1137                         return ERR_PTR(-EOPNOTSUPP);
1138                 }
1139
1140                 /* Always take reference to block in order to support execution
1141                  * of rules update path of cls API without rtnl lock. Caller
1142                  * must release block when it is finished using it. 'if' block
1143                  * of this conditional obtain reference to block by calling
1144                  * tcf_block_refcnt_get().
1145                  */
1146                 refcount_inc(&block->refcnt);
1147         }
1148
1149         return block;
1150 }
1151
1152 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1153                             struct tcf_block_ext_info *ei, bool rtnl_held)
1154 {
1155         if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1156                 /* Flushing/putting all chains will cause the block to be
1157                  * deallocated when last chain is freed. However, if chain_list
1158                  * is empty, block has to be manually deallocated. After block
1159                  * reference counter reached 0, it is no longer possible to
1160                  * increment it or add new chains to block.
1161                  */
1162                 bool free_block = list_empty(&block->chain_list);
1163
1164                 mutex_unlock(&block->lock);
1165                 if (tcf_block_shared(block))
1166                         tcf_block_remove(block, block->net);
1167
1168                 if (q)
1169                         tcf_block_offload_unbind(block, q, ei);
1170
1171                 if (free_block)
1172                         tcf_block_destroy(block);
1173                 else
1174                         tcf_block_flush_all_chains(block, rtnl_held);
1175         } else if (q) {
1176                 tcf_block_offload_unbind(block, q, ei);
1177         }
1178 }
1179
1180 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1181 {
1182         __tcf_block_put(block, NULL, NULL, rtnl_held);
1183 }
1184
1185 /* Find tcf block.
1186  * Set q, parent, cl when appropriate.
1187  */
1188
1189 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1190                                         u32 *parent, unsigned long *cl,
1191                                         int ifindex, u32 block_index,
1192                                         struct netlink_ext_ack *extack)
1193 {
1194         struct tcf_block *block;
1195         int err = 0;
1196
1197         ASSERT_RTNL();
1198
1199         err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1200         if (err)
1201                 goto errout;
1202
1203         err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1204         if (err)
1205                 goto errout_qdisc;
1206
1207         block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1208         if (IS_ERR(block)) {
1209                 err = PTR_ERR(block);
1210                 goto errout_qdisc;
1211         }
1212
1213         return block;
1214
1215 errout_qdisc:
1216         if (*q)
1217                 qdisc_put(*q);
1218 errout:
1219         *q = NULL;
1220         return ERR_PTR(err);
1221 }
1222
1223 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1224                               bool rtnl_held)
1225 {
1226         if (!IS_ERR_OR_NULL(block))
1227                 tcf_block_refcnt_put(block, rtnl_held);
1228
1229         if (q) {
1230                 if (rtnl_held)
1231                         qdisc_put(q);
1232                 else
1233                         qdisc_put_unlocked(q);
1234         }
1235 }
1236
1237 struct tcf_block_owner_item {
1238         struct list_head list;
1239         struct Qdisc *q;
1240         enum flow_block_binder_type binder_type;
1241 };
1242
1243 static void
1244 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1245                                struct Qdisc *q,
1246                                enum flow_block_binder_type binder_type)
1247 {
1248         if (block->keep_dst &&
1249             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1250             binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1251                 netif_keep_dst(qdisc_dev(q));
1252 }
1253
1254 void tcf_block_netif_keep_dst(struct tcf_block *block)
1255 {
1256         struct tcf_block_owner_item *item;
1257
1258         block->keep_dst = true;
1259         list_for_each_entry(item, &block->owner_list, list)
1260                 tcf_block_owner_netif_keep_dst(block, item->q,
1261                                                item->binder_type);
1262 }
1263 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1264
1265 static int tcf_block_owner_add(struct tcf_block *block,
1266                                struct Qdisc *q,
1267                                enum flow_block_binder_type binder_type)
1268 {
1269         struct tcf_block_owner_item *item;
1270
1271         item = kmalloc(sizeof(*item), GFP_KERNEL);
1272         if (!item)
1273                 return -ENOMEM;
1274         item->q = q;
1275         item->binder_type = binder_type;
1276         list_add(&item->list, &block->owner_list);
1277         return 0;
1278 }
1279
1280 static void tcf_block_owner_del(struct tcf_block *block,
1281                                 struct Qdisc *q,
1282                                 enum flow_block_binder_type binder_type)
1283 {
1284         struct tcf_block_owner_item *item;
1285
1286         list_for_each_entry(item, &block->owner_list, list) {
1287                 if (item->q == q && item->binder_type == binder_type) {
1288                         list_del(&item->list);
1289                         kfree(item);
1290                         return;
1291                 }
1292         }
1293         WARN_ON(1);
1294 }
1295
1296 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1297                       struct tcf_block_ext_info *ei,
1298                       struct netlink_ext_ack *extack)
1299 {
1300         struct net *net = qdisc_net(q);
1301         struct tcf_block *block = NULL;
1302         int err;
1303
1304         if (ei->block_index)
1305                 /* block_index not 0 means the shared block is requested */
1306                 block = tcf_block_refcnt_get(net, ei->block_index);
1307
1308         if (!block) {
1309                 block = tcf_block_create(net, q, ei->block_index, extack);
1310                 if (IS_ERR(block))
1311                         return PTR_ERR(block);
1312                 if (tcf_block_shared(block)) {
1313                         err = tcf_block_insert(block, net, extack);
1314                         if (err)
1315                                 goto err_block_insert;
1316                 }
1317         }
1318
1319         err = tcf_block_owner_add(block, q, ei->binder_type);
1320         if (err)
1321                 goto err_block_owner_add;
1322
1323         tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1324
1325         err = tcf_chain0_head_change_cb_add(block, ei, extack);
1326         if (err)
1327                 goto err_chain0_head_change_cb_add;
1328
1329         err = tcf_block_offload_bind(block, q, ei, extack);
1330         if (err)
1331                 goto err_block_offload_bind;
1332
1333         *p_block = block;
1334         return 0;
1335
1336 err_block_offload_bind:
1337         tcf_chain0_head_change_cb_del(block, ei);
1338 err_chain0_head_change_cb_add:
1339         tcf_block_owner_del(block, q, ei->binder_type);
1340 err_block_owner_add:
1341 err_block_insert:
1342         tcf_block_refcnt_put(block, true);
1343         return err;
1344 }
1345 EXPORT_SYMBOL(tcf_block_get_ext);
1346
1347 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1348 {
1349         struct tcf_proto __rcu **p_filter_chain = priv;
1350
1351         rcu_assign_pointer(*p_filter_chain, tp_head);
1352 }
1353
1354 int tcf_block_get(struct tcf_block **p_block,
1355                   struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1356                   struct netlink_ext_ack *extack)
1357 {
1358         struct tcf_block_ext_info ei = {
1359                 .chain_head_change = tcf_chain_head_change_dflt,
1360                 .chain_head_change_priv = p_filter_chain,
1361         };
1362
1363         WARN_ON(!p_filter_chain);
1364         return tcf_block_get_ext(p_block, q, &ei, extack);
1365 }
1366 EXPORT_SYMBOL(tcf_block_get);
1367
1368 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1369  * actions should be all removed after flushing.
1370  */
1371 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1372                        struct tcf_block_ext_info *ei)
1373 {
1374         if (!block)
1375                 return;
1376         tcf_chain0_head_change_cb_del(block, ei);
1377         tcf_block_owner_del(block, q, ei->binder_type);
1378
1379         __tcf_block_put(block, q, ei, true);
1380 }
1381 EXPORT_SYMBOL(tcf_block_put_ext);
1382
1383 void tcf_block_put(struct tcf_block *block)
1384 {
1385         struct tcf_block_ext_info ei = {0, };
1386
1387         if (!block)
1388                 return;
1389         tcf_block_put_ext(block, block->q, &ei);
1390 }
1391
1392 EXPORT_SYMBOL(tcf_block_put);
1393
1394 static int
1395 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1396                             void *cb_priv, bool add, bool offload_in_use,
1397                             struct netlink_ext_ack *extack)
1398 {
1399         struct tcf_chain *chain, *chain_prev;
1400         struct tcf_proto *tp, *tp_prev;
1401         int err;
1402
1403         lockdep_assert_held(&block->cb_lock);
1404
1405         for (chain = __tcf_get_next_chain(block, NULL);
1406              chain;
1407              chain_prev = chain,
1408                      chain = __tcf_get_next_chain(block, chain),
1409                      tcf_chain_put(chain_prev)) {
1410                 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1411                      tp_prev = tp,
1412                              tp = __tcf_get_next_proto(chain, tp),
1413                              tcf_proto_put(tp_prev, true, NULL)) {
1414                         if (tp->ops->reoffload) {
1415                                 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1416                                                          extack);
1417                                 if (err && add)
1418                                         goto err_playback_remove;
1419                         } else if (add && offload_in_use) {
1420                                 err = -EOPNOTSUPP;
1421                                 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1422                                 goto err_playback_remove;
1423                         }
1424                 }
1425         }
1426
1427         return 0;
1428
1429 err_playback_remove:
1430         tcf_proto_put(tp, true, NULL);
1431         tcf_chain_put(chain);
1432         tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1433                                     extack);
1434         return err;
1435 }
1436
1437 static int tcf_block_bind(struct tcf_block *block,
1438                           struct flow_block_offload *bo)
1439 {
1440         struct flow_block_cb *block_cb, *next;
1441         int err, i = 0;
1442
1443         lockdep_assert_held(&block->cb_lock);
1444
1445         list_for_each_entry(block_cb, &bo->cb_list, list) {
1446                 err = tcf_block_playback_offloads(block, block_cb->cb,
1447                                                   block_cb->cb_priv, true,
1448                                                   tcf_block_offload_in_use(block),
1449                                                   bo->extack);
1450                 if (err)
1451                         goto err_unroll;
1452                 if (!bo->unlocked_driver_cb)
1453                         block->lockeddevcnt++;
1454
1455                 i++;
1456         }
1457         list_splice(&bo->cb_list, &block->flow_block.cb_list);
1458
1459         return 0;
1460
1461 err_unroll:
1462         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1463                 if (i-- > 0) {
1464                         list_del(&block_cb->list);
1465                         tcf_block_playback_offloads(block, block_cb->cb,
1466                                                     block_cb->cb_priv, false,
1467                                                     tcf_block_offload_in_use(block),
1468                                                     NULL);
1469                         if (!bo->unlocked_driver_cb)
1470                                 block->lockeddevcnt--;
1471                 }
1472                 flow_block_cb_free(block_cb);
1473         }
1474
1475         return err;
1476 }
1477
1478 static void tcf_block_unbind(struct tcf_block *block,
1479                              struct flow_block_offload *bo)
1480 {
1481         struct flow_block_cb *block_cb, *next;
1482
1483         lockdep_assert_held(&block->cb_lock);
1484
1485         list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1486                 tcf_block_playback_offloads(block, block_cb->cb,
1487                                             block_cb->cb_priv, false,
1488                                             tcf_block_offload_in_use(block),
1489                                             NULL);
1490                 list_del(&block_cb->list);
1491                 flow_block_cb_free(block_cb);
1492                 if (!bo->unlocked_driver_cb)
1493                         block->lockeddevcnt--;
1494         }
1495 }
1496
1497 static int tcf_block_setup(struct tcf_block *block,
1498                            struct flow_block_offload *bo)
1499 {
1500         int err;
1501
1502         switch (bo->command) {
1503         case FLOW_BLOCK_BIND:
1504                 err = tcf_block_bind(block, bo);
1505                 break;
1506         case FLOW_BLOCK_UNBIND:
1507                 err = 0;
1508                 tcf_block_unbind(block, bo);
1509                 break;
1510         default:
1511                 WARN_ON_ONCE(1);
1512                 err = -EOPNOTSUPP;
1513         }
1514
1515         return err;
1516 }
1517
1518 /* Main classifier routine: scans classifier chain attached
1519  * to this qdisc, (optionally) tests for protocol and asks
1520  * specific classifiers.
1521  */
1522 static inline int __tcf_classify(struct sk_buff *skb,
1523                                  const struct tcf_proto *tp,
1524                                  const struct tcf_proto *orig_tp,
1525                                  struct tcf_result *res,
1526                                  bool compat_mode,
1527                                  u32 *last_executed_chain)
1528 {
1529 #ifdef CONFIG_NET_CLS_ACT
1530         const int max_reclassify_loop = 4;
1531         const struct tcf_proto *first_tp;
1532         int limit = 0;
1533
1534 reclassify:
1535 #endif
1536         for (; tp; tp = rcu_dereference_bh(tp->next)) {
1537                 __be16 protocol = tc_skb_protocol(skb);
1538                 int err;
1539
1540                 if (tp->protocol != protocol &&
1541                     tp->protocol != htons(ETH_P_ALL))
1542                         continue;
1543
1544                 err = tp->classify(skb, tp, res);
1545 #ifdef CONFIG_NET_CLS_ACT
1546                 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1547                         first_tp = orig_tp;
1548                         *last_executed_chain = first_tp->chain->index;
1549                         goto reset;
1550                 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1551                         first_tp = res->goto_tp;
1552                         *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1553                         goto reset;
1554                 }
1555 #endif
1556                 if (err >= 0)
1557                         return err;
1558         }
1559
1560         return TC_ACT_UNSPEC; /* signal: continue lookup */
1561 #ifdef CONFIG_NET_CLS_ACT
1562 reset:
1563         if (unlikely(limit++ >= max_reclassify_loop)) {
1564                 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1565                                        tp->chain->block->index,
1566                                        tp->prio & 0xffff,
1567                                        ntohs(tp->protocol));
1568                 return TC_ACT_SHOT;
1569         }
1570
1571         tp = first_tp;
1572         goto reclassify;
1573 #endif
1574 }
1575
1576 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1577                  struct tcf_result *res, bool compat_mode)
1578 {
1579         u32 last_executed_chain = 0;
1580
1581         return __tcf_classify(skb, tp, tp, res, compat_mode,
1582                               &last_executed_chain);
1583 }
1584 EXPORT_SYMBOL(tcf_classify);
1585
1586 int tcf_classify_ingress(struct sk_buff *skb,
1587                          const struct tcf_block *ingress_block,
1588                          const struct tcf_proto *tp,
1589                          struct tcf_result *res, bool compat_mode)
1590 {
1591 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1592         u32 last_executed_chain = 0;
1593
1594         return __tcf_classify(skb, tp, tp, res, compat_mode,
1595                               &last_executed_chain);
1596 #else
1597         u32 last_executed_chain = tp ? tp->chain->index : 0;
1598         const struct tcf_proto *orig_tp = tp;
1599         struct tc_skb_ext *ext;
1600         int ret;
1601
1602         ext = skb_ext_find(skb, TC_SKB_EXT);
1603
1604         if (ext && ext->chain) {
1605                 struct tcf_chain *fchain;
1606
1607                 fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain);
1608                 if (!fchain)
1609                         return TC_ACT_SHOT;
1610
1611                 /* Consume, so cloned/redirect skbs won't inherit ext */
1612                 skb_ext_del(skb, TC_SKB_EXT);
1613
1614                 tp = rcu_dereference_bh(fchain->filter_chain);
1615                 last_executed_chain = fchain->index;
1616         }
1617
1618         ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1619                              &last_executed_chain);
1620
1621         /* If we missed on some chain */
1622         if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1623                 ext = skb_ext_add(skb, TC_SKB_EXT);
1624                 if (WARN_ON_ONCE(!ext))
1625                         return TC_ACT_SHOT;
1626                 ext->chain = last_executed_chain;
1627         }
1628
1629         return ret;
1630 #endif
1631 }
1632 EXPORT_SYMBOL(tcf_classify_ingress);
1633
1634 struct tcf_chain_info {
1635         struct tcf_proto __rcu **pprev;
1636         struct tcf_proto __rcu *next;
1637 };
1638
1639 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1640                                            struct tcf_chain_info *chain_info)
1641 {
1642         return tcf_chain_dereference(*chain_info->pprev, chain);
1643 }
1644
1645 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1646                                struct tcf_chain_info *chain_info,
1647                                struct tcf_proto *tp)
1648 {
1649         if (chain->flushing)
1650                 return -EAGAIN;
1651
1652         if (*chain_info->pprev == chain->filter_chain)
1653                 tcf_chain0_head_change(chain, tp);
1654         tcf_proto_get(tp);
1655         RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1656         rcu_assign_pointer(*chain_info->pprev, tp);
1657
1658         return 0;
1659 }
1660
1661 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1662                                 struct tcf_chain_info *chain_info,
1663                                 struct tcf_proto *tp)
1664 {
1665         struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1666
1667         tcf_proto_mark_delete(tp);
1668         if (tp == chain->filter_chain)
1669                 tcf_chain0_head_change(chain, next);
1670         RCU_INIT_POINTER(*chain_info->pprev, next);
1671 }
1672
1673 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1674                                            struct tcf_chain_info *chain_info,
1675                                            u32 protocol, u32 prio,
1676                                            bool prio_allocate);
1677
1678 /* Try to insert new proto.
1679  * If proto with specified priority already exists, free new proto
1680  * and return existing one.
1681  */
1682
1683 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1684                                                     struct tcf_proto *tp_new,
1685                                                     u32 protocol, u32 prio,
1686                                                     bool rtnl_held)
1687 {
1688         struct tcf_chain_info chain_info;
1689         struct tcf_proto *tp;
1690         int err = 0;
1691
1692         mutex_lock(&chain->filter_chain_lock);
1693
1694         if (tcf_proto_exists_destroying(chain, tp_new)) {
1695                 mutex_unlock(&chain->filter_chain_lock);
1696                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1697                 return ERR_PTR(-EAGAIN);
1698         }
1699
1700         tp = tcf_chain_tp_find(chain, &chain_info,
1701                                protocol, prio, false);
1702         if (!tp)
1703                 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1704         mutex_unlock(&chain->filter_chain_lock);
1705
1706         if (tp) {
1707                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1708                 tp_new = tp;
1709         } else if (err) {
1710                 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1711                 tp_new = ERR_PTR(err);
1712         }
1713
1714         return tp_new;
1715 }
1716
1717 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1718                                       struct tcf_proto *tp, bool rtnl_held,
1719                                       struct netlink_ext_ack *extack)
1720 {
1721         struct tcf_chain_info chain_info;
1722         struct tcf_proto *tp_iter;
1723         struct tcf_proto **pprev;
1724         struct tcf_proto *next;
1725
1726         mutex_lock(&chain->filter_chain_lock);
1727
1728         /* Atomically find and remove tp from chain. */
1729         for (pprev = &chain->filter_chain;
1730              (tp_iter = tcf_chain_dereference(*pprev, chain));
1731              pprev = &tp_iter->next) {
1732                 if (tp_iter == tp) {
1733                         chain_info.pprev = pprev;
1734                         chain_info.next = tp_iter->next;
1735                         WARN_ON(tp_iter->deleting);
1736                         break;
1737                 }
1738         }
1739         /* Verify that tp still exists and no new filters were inserted
1740          * concurrently.
1741          * Mark tp for deletion if it is empty.
1742          */
1743         if (!tp_iter || !tcf_proto_check_delete(tp)) {
1744                 mutex_unlock(&chain->filter_chain_lock);
1745                 return;
1746         }
1747
1748         tcf_proto_signal_destroying(chain, tp);
1749         next = tcf_chain_dereference(chain_info.next, chain);
1750         if (tp == chain->filter_chain)
1751                 tcf_chain0_head_change(chain, next);
1752         RCU_INIT_POINTER(*chain_info.pprev, next);
1753         mutex_unlock(&chain->filter_chain_lock);
1754
1755         tcf_proto_put(tp, rtnl_held, extack);
1756 }
1757
1758 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1759                                            struct tcf_chain_info *chain_info,
1760                                            u32 protocol, u32 prio,
1761                                            bool prio_allocate)
1762 {
1763         struct tcf_proto **pprev;
1764         struct tcf_proto *tp;
1765
1766         /* Check the chain for existence of proto-tcf with this priority */
1767         for (pprev = &chain->filter_chain;
1768              (tp = tcf_chain_dereference(*pprev, chain));
1769              pprev = &tp->next) {
1770                 if (tp->prio >= prio) {
1771                         if (tp->prio == prio) {
1772                                 if (prio_allocate ||
1773                                     (tp->protocol != protocol && protocol))
1774                                         return ERR_PTR(-EINVAL);
1775                         } else {
1776                                 tp = NULL;
1777                         }
1778                         break;
1779                 }
1780         }
1781         chain_info->pprev = pprev;
1782         if (tp) {
1783                 chain_info->next = tp->next;
1784                 tcf_proto_get(tp);
1785         } else {
1786                 chain_info->next = NULL;
1787         }
1788         return tp;
1789 }
1790
1791 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1792                          struct tcf_proto *tp, struct tcf_block *block,
1793                          struct Qdisc *q, u32 parent, void *fh,
1794                          u32 portid, u32 seq, u16 flags, int event,
1795                          bool terse_dump, bool rtnl_held)
1796 {
1797         struct tcmsg *tcm;
1798         struct nlmsghdr  *nlh;
1799         unsigned char *b = skb_tail_pointer(skb);
1800
1801         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1802         if (!nlh)
1803                 goto out_nlmsg_trim;
1804         tcm = nlmsg_data(nlh);
1805         tcm->tcm_family = AF_UNSPEC;
1806         tcm->tcm__pad1 = 0;
1807         tcm->tcm__pad2 = 0;
1808         if (q) {
1809                 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1810                 tcm->tcm_parent = parent;
1811         } else {
1812                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1813                 tcm->tcm_block_index = block->index;
1814         }
1815         tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1816         if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1817                 goto nla_put_failure;
1818         if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1819                 goto nla_put_failure;
1820         if (!fh) {
1821                 tcm->tcm_handle = 0;
1822         } else if (terse_dump) {
1823                 if (tp->ops->terse_dump) {
1824                         if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1825                                                 rtnl_held) < 0)
1826                                 goto nla_put_failure;
1827                 } else {
1828                         goto cls_op_not_supp;
1829                 }
1830         } else {
1831                 if (tp->ops->dump &&
1832                     tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1833                         goto nla_put_failure;
1834         }
1835         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1836         return skb->len;
1837
1838 out_nlmsg_trim:
1839 nla_put_failure:
1840 cls_op_not_supp:
1841         nlmsg_trim(skb, b);
1842         return -1;
1843 }
1844
1845 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1846                           struct nlmsghdr *n, struct tcf_proto *tp,
1847                           struct tcf_block *block, struct Qdisc *q,
1848                           u32 parent, void *fh, int event, bool unicast,
1849                           bool rtnl_held)
1850 {
1851         struct sk_buff *skb;
1852         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1853         int err = 0;
1854
1855         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1856         if (!skb)
1857                 return -ENOBUFS;
1858
1859         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1860                           n->nlmsg_seq, n->nlmsg_flags, event,
1861                           false, rtnl_held) <= 0) {
1862                 kfree_skb(skb);
1863                 return -EINVAL;
1864         }
1865
1866         if (unicast)
1867                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1868         else
1869                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1870                                      n->nlmsg_flags & NLM_F_ECHO);
1871
1872         if (err > 0)
1873                 err = 0;
1874         return err;
1875 }
1876
1877 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1878                               struct nlmsghdr *n, struct tcf_proto *tp,
1879                               struct tcf_block *block, struct Qdisc *q,
1880                               u32 parent, void *fh, bool unicast, bool *last,
1881                               bool rtnl_held, struct netlink_ext_ack *extack)
1882 {
1883         struct sk_buff *skb;
1884         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1885         int err;
1886
1887         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1888         if (!skb)
1889                 return -ENOBUFS;
1890
1891         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1892                           n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1893                           false, rtnl_held) <= 0) {
1894                 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1895                 kfree_skb(skb);
1896                 return -EINVAL;
1897         }
1898
1899         err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1900         if (err) {
1901                 kfree_skb(skb);
1902                 return err;
1903         }
1904
1905         if (unicast)
1906                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1907         else
1908                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1909                                      n->nlmsg_flags & NLM_F_ECHO);
1910         if (err < 0)
1911                 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1912
1913         if (err > 0)
1914                 err = 0;
1915         return err;
1916 }
1917
1918 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1919                                  struct tcf_block *block, struct Qdisc *q,
1920                                  u32 parent, struct nlmsghdr *n,
1921                                  struct tcf_chain *chain, int event,
1922                                  bool rtnl_held)
1923 {
1924         struct tcf_proto *tp;
1925
1926         for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1927              tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1928                 tfilter_notify(net, oskb, n, tp, block,
1929                                q, parent, NULL, event, false, rtnl_held);
1930 }
1931
1932 static void tfilter_put(struct tcf_proto *tp, void *fh)
1933 {
1934         if (tp->ops->put && fh)
1935                 tp->ops->put(tp, fh);
1936 }
1937
1938 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1939                           struct netlink_ext_ack *extack)
1940 {
1941         struct net *net = sock_net(skb->sk);
1942         struct nlattr *tca[TCA_MAX + 1];
1943         char name[IFNAMSIZ];
1944         struct tcmsg *t;
1945         u32 protocol;
1946         u32 prio;
1947         bool prio_allocate;
1948         u32 parent;
1949         u32 chain_index;
1950         struct Qdisc *q = NULL;
1951         struct tcf_chain_info chain_info;
1952         struct tcf_chain *chain = NULL;
1953         struct tcf_block *block;
1954         struct tcf_proto *tp;
1955         unsigned long cl;
1956         void *fh;
1957         int err;
1958         int tp_created;
1959         bool rtnl_held = false;
1960
1961         if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1962                 return -EPERM;
1963
1964 replay:
1965         tp_created = 0;
1966
1967         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1968                                      rtm_tca_policy, extack);
1969         if (err < 0)
1970                 return err;
1971
1972         t = nlmsg_data(n);
1973         protocol = TC_H_MIN(t->tcm_info);
1974         prio = TC_H_MAJ(t->tcm_info);
1975         prio_allocate = false;
1976         parent = t->tcm_parent;
1977         tp = NULL;
1978         cl = 0;
1979         block = NULL;
1980
1981         if (prio == 0) {
1982                 /* If no priority is provided by the user,
1983                  * we allocate one.
1984                  */
1985                 if (n->nlmsg_flags & NLM_F_CREATE) {
1986                         prio = TC_H_MAKE(0x80000000U, 0U);
1987                         prio_allocate = true;
1988                 } else {
1989                         NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1990                         return -ENOENT;
1991                 }
1992         }
1993
1994         /* Find head of filter chain. */
1995
1996         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1997         if (err)
1998                 return err;
1999
2000         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2001                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2002                 err = -EINVAL;
2003                 goto errout;
2004         }
2005
2006         /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2007          * block is shared (no qdisc found), qdisc is not unlocked, classifier
2008          * type is not specified, classifier is not unlocked.
2009          */
2010         if (rtnl_held ||
2011             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2012             !tcf_proto_is_unlocked(name)) {
2013                 rtnl_held = true;
2014                 rtnl_lock();
2015         }
2016
2017         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2018         if (err)
2019                 goto errout;
2020
2021         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2022                                  extack);
2023         if (IS_ERR(block)) {
2024                 err = PTR_ERR(block);
2025                 goto errout;
2026         }
2027         block->classid = parent;
2028
2029         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2030         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2031                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2032                 err = -EINVAL;
2033                 goto errout;
2034         }
2035         chain = tcf_chain_get(block, chain_index, true);
2036         if (!chain) {
2037                 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2038                 err = -ENOMEM;
2039                 goto errout;
2040         }
2041
2042         mutex_lock(&chain->filter_chain_lock);
2043         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2044                                prio, prio_allocate);
2045         if (IS_ERR(tp)) {
2046                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2047                 err = PTR_ERR(tp);
2048                 goto errout_locked;
2049         }
2050
2051         if (tp == NULL) {
2052                 struct tcf_proto *tp_new = NULL;
2053
2054                 if (chain->flushing) {
2055                         err = -EAGAIN;
2056                         goto errout_locked;
2057                 }
2058
2059                 /* Proto-tcf does not exist, create new one */
2060
2061                 if (tca[TCA_KIND] == NULL || !protocol) {
2062                         NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2063                         err = -EINVAL;
2064                         goto errout_locked;
2065                 }
2066
2067                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2068                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2069                         err = -ENOENT;
2070                         goto errout_locked;
2071                 }
2072
2073                 if (prio_allocate)
2074                         prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2075                                                                &chain_info));
2076
2077                 mutex_unlock(&chain->filter_chain_lock);
2078                 tp_new = tcf_proto_create(name, protocol, prio, chain,
2079                                           rtnl_held, extack);
2080                 if (IS_ERR(tp_new)) {
2081                         err = PTR_ERR(tp_new);
2082                         goto errout_tp;
2083                 }
2084
2085                 tp_created = 1;
2086                 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2087                                                 rtnl_held);
2088                 if (IS_ERR(tp)) {
2089                         err = PTR_ERR(tp);
2090                         goto errout_tp;
2091                 }
2092         } else {
2093                 mutex_unlock(&chain->filter_chain_lock);
2094         }
2095
2096         if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2097                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2098                 err = -EINVAL;
2099                 goto errout;
2100         }
2101
2102         fh = tp->ops->get(tp, t->tcm_handle);
2103
2104         if (!fh) {
2105                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2106                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2107                         err = -ENOENT;
2108                         goto errout;
2109                 }
2110         } else if (n->nlmsg_flags & NLM_F_EXCL) {
2111                 tfilter_put(tp, fh);
2112                 NL_SET_ERR_MSG(extack, "Filter already exists");
2113                 err = -EEXIST;
2114                 goto errout;
2115         }
2116
2117         if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2118                 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2119                 err = -EINVAL;
2120                 goto errout;
2121         }
2122
2123         err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2124                               n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2125                               rtnl_held, extack);
2126         if (err == 0) {
2127                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2128                                RTM_NEWTFILTER, false, rtnl_held);
2129                 tfilter_put(tp, fh);
2130                 /* q pointer is NULL for shared blocks */
2131                 if (q)
2132                         q->flags &= ~TCQ_F_CAN_BYPASS;
2133         }
2134
2135 errout:
2136         if (err && tp_created)
2137                 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2138 errout_tp:
2139         if (chain) {
2140                 if (tp && !IS_ERR(tp))
2141                         tcf_proto_put(tp, rtnl_held, NULL);
2142                 if (!tp_created)
2143                         tcf_chain_put(chain);
2144         }
2145         tcf_block_release(q, block, rtnl_held);
2146
2147         if (rtnl_held)
2148                 rtnl_unlock();
2149
2150         if (err == -EAGAIN) {
2151                 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2152                  * of target chain.
2153                  */
2154                 rtnl_held = true;
2155                 /* Replay the request. */
2156                 goto replay;
2157         }
2158         return err;
2159
2160 errout_locked:
2161         mutex_unlock(&chain->filter_chain_lock);
2162         goto errout;
2163 }
2164
2165 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2166                           struct netlink_ext_ack *extack)
2167 {
2168         struct net *net = sock_net(skb->sk);
2169         struct nlattr *tca[TCA_MAX + 1];
2170         char name[IFNAMSIZ];
2171         struct tcmsg *t;
2172         u32 protocol;
2173         u32 prio;
2174         u32 parent;
2175         u32 chain_index;
2176         struct Qdisc *q = NULL;
2177         struct tcf_chain_info chain_info;
2178         struct tcf_chain *chain = NULL;
2179         struct tcf_block *block = NULL;
2180         struct tcf_proto *tp = NULL;
2181         unsigned long cl = 0;
2182         void *fh = NULL;
2183         int err;
2184         bool rtnl_held = false;
2185
2186         if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2187                 return -EPERM;
2188
2189         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2190                                      rtm_tca_policy, extack);
2191         if (err < 0)
2192                 return err;
2193
2194         t = nlmsg_data(n);
2195         protocol = TC_H_MIN(t->tcm_info);
2196         prio = TC_H_MAJ(t->tcm_info);
2197         parent = t->tcm_parent;
2198
2199         if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2200                 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2201                 return -ENOENT;
2202         }
2203
2204         /* Find head of filter chain. */
2205
2206         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2207         if (err)
2208                 return err;
2209
2210         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2211                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2212                 err = -EINVAL;
2213                 goto errout;
2214         }
2215         /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2216          * found), qdisc is not unlocked, classifier type is not specified,
2217          * classifier is not unlocked.
2218          */
2219         if (!prio ||
2220             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2221             !tcf_proto_is_unlocked(name)) {
2222                 rtnl_held = true;
2223                 rtnl_lock();
2224         }
2225
2226         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2227         if (err)
2228                 goto errout;
2229
2230         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2231                                  extack);
2232         if (IS_ERR(block)) {
2233                 err = PTR_ERR(block);
2234                 goto errout;
2235         }
2236
2237         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2238         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2239                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2240                 err = -EINVAL;
2241                 goto errout;
2242         }
2243         chain = tcf_chain_get(block, chain_index, false);
2244         if (!chain) {
2245                 /* User requested flush on non-existent chain. Nothing to do,
2246                  * so just return success.
2247                  */
2248                 if (prio == 0) {
2249                         err = 0;
2250                         goto errout;
2251                 }
2252                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2253                 err = -ENOENT;
2254                 goto errout;
2255         }
2256
2257         if (prio == 0) {
2258                 tfilter_notify_chain(net, skb, block, q, parent, n,
2259                                      chain, RTM_DELTFILTER, rtnl_held);
2260                 tcf_chain_flush(chain, rtnl_held);
2261                 err = 0;
2262                 goto errout;
2263         }
2264
2265         mutex_lock(&chain->filter_chain_lock);
2266         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2267                                prio, false);
2268         if (!tp || IS_ERR(tp)) {
2269                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2270                 err = tp ? PTR_ERR(tp) : -ENOENT;
2271                 goto errout_locked;
2272         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2273                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2274                 err = -EINVAL;
2275                 goto errout_locked;
2276         } else if (t->tcm_handle == 0) {
2277                 tcf_proto_signal_destroying(chain, tp);
2278                 tcf_chain_tp_remove(chain, &chain_info, tp);
2279                 mutex_unlock(&chain->filter_chain_lock);
2280
2281                 tcf_proto_put(tp, rtnl_held, NULL);
2282                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2283                                RTM_DELTFILTER, false, rtnl_held);
2284                 err = 0;
2285                 goto errout;
2286         }
2287         mutex_unlock(&chain->filter_chain_lock);
2288
2289         fh = tp->ops->get(tp, t->tcm_handle);
2290
2291         if (!fh) {
2292                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2293                 err = -ENOENT;
2294         } else {
2295                 bool last;
2296
2297                 err = tfilter_del_notify(net, skb, n, tp, block,
2298                                          q, parent, fh, false, &last,
2299                                          rtnl_held, extack);
2300
2301                 if (err)
2302                         goto errout;
2303                 if (last)
2304                         tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2305         }
2306
2307 errout:
2308         if (chain) {
2309                 if (tp && !IS_ERR(tp))
2310                         tcf_proto_put(tp, rtnl_held, NULL);
2311                 tcf_chain_put(chain);
2312         }
2313         tcf_block_release(q, block, rtnl_held);
2314
2315         if (rtnl_held)
2316                 rtnl_unlock();
2317
2318         return err;
2319
2320 errout_locked:
2321         mutex_unlock(&chain->filter_chain_lock);
2322         goto errout;
2323 }
2324
2325 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2326                           struct netlink_ext_ack *extack)
2327 {
2328         struct net *net = sock_net(skb->sk);
2329         struct nlattr *tca[TCA_MAX + 1];
2330         char name[IFNAMSIZ];
2331         struct tcmsg *t;
2332         u32 protocol;
2333         u32 prio;
2334         u32 parent;
2335         u32 chain_index;
2336         struct Qdisc *q = NULL;
2337         struct tcf_chain_info chain_info;
2338         struct tcf_chain *chain = NULL;
2339         struct tcf_block *block = NULL;
2340         struct tcf_proto *tp = NULL;
2341         unsigned long cl = 0;
2342         void *fh = NULL;
2343         int err;
2344         bool rtnl_held = false;
2345
2346         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2347                                      rtm_tca_policy, extack);
2348         if (err < 0)
2349                 return err;
2350
2351         t = nlmsg_data(n);
2352         protocol = TC_H_MIN(t->tcm_info);
2353         prio = TC_H_MAJ(t->tcm_info);
2354         parent = t->tcm_parent;
2355
2356         if (prio == 0) {
2357                 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2358                 return -ENOENT;
2359         }
2360
2361         /* Find head of filter chain. */
2362
2363         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2364         if (err)
2365                 return err;
2366
2367         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2368                 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2369                 err = -EINVAL;
2370                 goto errout;
2371         }
2372         /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2373          * unlocked, classifier type is not specified, classifier is not
2374          * unlocked.
2375          */
2376         if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2377             !tcf_proto_is_unlocked(name)) {
2378                 rtnl_held = true;
2379                 rtnl_lock();
2380         }
2381
2382         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2383         if (err)
2384                 goto errout;
2385
2386         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2387                                  extack);
2388         if (IS_ERR(block)) {
2389                 err = PTR_ERR(block);
2390                 goto errout;
2391         }
2392
2393         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2394         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2395                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2396                 err = -EINVAL;
2397                 goto errout;
2398         }
2399         chain = tcf_chain_get(block, chain_index, false);
2400         if (!chain) {
2401                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2402                 err = -EINVAL;
2403                 goto errout;
2404         }
2405
2406         mutex_lock(&chain->filter_chain_lock);
2407         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2408                                prio, false);
2409         mutex_unlock(&chain->filter_chain_lock);
2410         if (!tp || IS_ERR(tp)) {
2411                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2412                 err = tp ? PTR_ERR(tp) : -ENOENT;
2413                 goto errout;
2414         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2415                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2416                 err = -EINVAL;
2417                 goto errout;
2418         }
2419
2420         fh = tp->ops->get(tp, t->tcm_handle);
2421
2422         if (!fh) {
2423                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2424                 err = -ENOENT;
2425         } else {
2426                 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2427                                      fh, RTM_NEWTFILTER, true, rtnl_held);
2428                 if (err < 0)
2429                         NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2430         }
2431
2432         tfilter_put(tp, fh);
2433 errout:
2434         if (chain) {
2435                 if (tp && !IS_ERR(tp))
2436                         tcf_proto_put(tp, rtnl_held, NULL);
2437                 tcf_chain_put(chain);
2438         }
2439         tcf_block_release(q, block, rtnl_held);
2440
2441         if (rtnl_held)
2442                 rtnl_unlock();
2443
2444         return err;
2445 }
2446
2447 struct tcf_dump_args {
2448         struct tcf_walker w;
2449         struct sk_buff *skb;
2450         struct netlink_callback *cb;
2451         struct tcf_block *block;
2452         struct Qdisc *q;
2453         u32 parent;
2454         bool terse_dump;
2455 };
2456
2457 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2458 {
2459         struct tcf_dump_args *a = (void *)arg;
2460         struct net *net = sock_net(a->skb->sk);
2461
2462         return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2463                              n, NETLINK_CB(a->cb->skb).portid,
2464                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2465                              RTM_NEWTFILTER, a->terse_dump, true);
2466 }
2467
2468 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2469                            struct sk_buff *skb, struct netlink_callback *cb,
2470                            long index_start, long *p_index, bool terse)
2471 {
2472         struct net *net = sock_net(skb->sk);
2473         struct tcf_block *block = chain->block;
2474         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2475         struct tcf_proto *tp, *tp_prev;
2476         struct tcf_dump_args arg;
2477
2478         for (tp = __tcf_get_next_proto(chain, NULL);
2479              tp;
2480              tp_prev = tp,
2481                      tp = __tcf_get_next_proto(chain, tp),
2482                      tcf_proto_put(tp_prev, true, NULL),
2483                      (*p_index)++) {
2484                 if (*p_index < index_start)
2485                         continue;
2486                 if (TC_H_MAJ(tcm->tcm_info) &&
2487                     TC_H_MAJ(tcm->tcm_info) != tp->prio)
2488                         continue;
2489                 if (TC_H_MIN(tcm->tcm_info) &&
2490                     TC_H_MIN(tcm->tcm_info) != tp->protocol)
2491                         continue;
2492                 if (*p_index > index_start)
2493                         memset(&cb->args[1], 0,
2494                                sizeof(cb->args) - sizeof(cb->args[0]));
2495                 if (cb->args[1] == 0) {
2496                         if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2497                                           NETLINK_CB(cb->skb).portid,
2498                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
2499                                           RTM_NEWTFILTER, false, true) <= 0)
2500                                 goto errout;
2501                         cb->args[1] = 1;
2502                 }
2503                 if (!tp->ops->walk)
2504                         continue;
2505                 arg.w.fn = tcf_node_dump;
2506                 arg.skb = skb;
2507                 arg.cb = cb;
2508                 arg.block = block;
2509                 arg.q = q;
2510                 arg.parent = parent;
2511                 arg.w.stop = 0;
2512                 arg.w.skip = cb->args[1] - 1;
2513                 arg.w.count = 0;
2514                 arg.w.cookie = cb->args[2];
2515                 arg.terse_dump = terse;
2516                 tp->ops->walk(tp, &arg.w, true);
2517                 cb->args[2] = arg.w.cookie;
2518                 cb->args[1] = arg.w.count + 1;
2519                 if (arg.w.stop)
2520                         goto errout;
2521         }
2522         return true;
2523
2524 errout:
2525         tcf_proto_put(tp, true, NULL);
2526         return false;
2527 }
2528
2529 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2530         [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2531 };
2532
2533 /* called with RTNL */
2534 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2535 {
2536         struct tcf_chain *chain, *chain_prev;
2537         struct net *net = sock_net(skb->sk);
2538         struct nlattr *tca[TCA_MAX + 1];
2539         struct Qdisc *q = NULL;
2540         struct tcf_block *block;
2541         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2542         bool terse_dump = false;
2543         long index_start;
2544         long index;
2545         u32 parent;
2546         int err;
2547
2548         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2549                 return skb->len;
2550
2551         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2552                                      tcf_tfilter_dump_policy, cb->extack);
2553         if (err)
2554                 return err;
2555
2556         if (tca[TCA_DUMP_FLAGS]) {
2557                 struct nla_bitfield32 flags =
2558                         nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2559
2560                 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2561         }
2562
2563         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2564                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2565                 if (!block)
2566                         goto out;
2567                 /* If we work with block index, q is NULL and parent value
2568                  * will never be used in the following code. The check
2569                  * in tcf_fill_node prevents it. However, compiler does not
2570                  * see that far, so set parent to zero to silence the warning
2571                  * about parent being uninitialized.
2572                  */
2573                 parent = 0;
2574         } else {
2575                 const struct Qdisc_class_ops *cops;
2576                 struct net_device *dev;
2577                 unsigned long cl = 0;
2578
2579                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2580                 if (!dev)
2581                         return skb->len;
2582
2583                 parent = tcm->tcm_parent;
2584                 if (!parent)
2585                         q = dev->qdisc;
2586                 else
2587                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2588                 if (!q)
2589                         goto out;
2590                 cops = q->ops->cl_ops;
2591                 if (!cops)
2592                         goto out;
2593                 if (!cops->tcf_block)
2594                         goto out;
2595                 if (TC_H_MIN(tcm->tcm_parent)) {
2596                         cl = cops->find(q, tcm->tcm_parent);
2597                         if (cl == 0)
2598                                 goto out;
2599                 }
2600                 block = cops->tcf_block(q, cl, NULL);
2601                 if (!block)
2602                         goto out;
2603                 parent = block->classid;
2604                 if (tcf_block_shared(block))
2605                         q = NULL;
2606         }
2607
2608         index_start = cb->args[0];
2609         index = 0;
2610
2611         for (chain = __tcf_get_next_chain(block, NULL);
2612              chain;
2613              chain_prev = chain,
2614                      chain = __tcf_get_next_chain(block, chain),
2615                      tcf_chain_put(chain_prev)) {
2616                 if (tca[TCA_CHAIN] &&
2617                     nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2618                         continue;
2619                 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2620                                     index_start, &index, terse_dump)) {
2621                         tcf_chain_put(chain);
2622                         err = -EMSGSIZE;
2623                         break;
2624                 }
2625         }
2626
2627         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2628                 tcf_block_refcnt_put(block, true);
2629         cb->args[0] = index;
2630
2631 out:
2632         /* If we did no progress, the error (EMSGSIZE) is real */
2633         if (skb->len == 0 && err)
2634                 return err;
2635         return skb->len;
2636 }
2637
2638 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2639                               void *tmplt_priv, u32 chain_index,
2640                               struct net *net, struct sk_buff *skb,
2641                               struct tcf_block *block,
2642                               u32 portid, u32 seq, u16 flags, int event)
2643 {
2644         unsigned char *b = skb_tail_pointer(skb);
2645         const struct tcf_proto_ops *ops;
2646         struct nlmsghdr *nlh;
2647         struct tcmsg *tcm;
2648         void *priv;
2649
2650         ops = tmplt_ops;
2651         priv = tmplt_priv;
2652
2653         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2654         if (!nlh)
2655                 goto out_nlmsg_trim;
2656         tcm = nlmsg_data(nlh);
2657         tcm->tcm_family = AF_UNSPEC;
2658         tcm->tcm__pad1 = 0;
2659         tcm->tcm__pad2 = 0;
2660         tcm->tcm_handle = 0;
2661         if (block->q) {
2662                 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2663                 tcm->tcm_parent = block->q->handle;
2664         } else {
2665                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2666                 tcm->tcm_block_index = block->index;
2667         }
2668
2669         if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2670                 goto nla_put_failure;
2671
2672         if (ops) {
2673                 if (nla_put_string(skb, TCA_KIND, ops->kind))
2674                         goto nla_put_failure;
2675                 if (ops->tmplt_dump(skb, net, priv) < 0)
2676                         goto nla_put_failure;
2677         }
2678
2679         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2680         return skb->len;
2681
2682 out_nlmsg_trim:
2683 nla_put_failure:
2684         nlmsg_trim(skb, b);
2685         return -EMSGSIZE;
2686 }
2687
2688 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2689                            u32 seq, u16 flags, int event, bool unicast)
2690 {
2691         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2692         struct tcf_block *block = chain->block;
2693         struct net *net = block->net;
2694         struct sk_buff *skb;
2695         int err = 0;
2696
2697         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2698         if (!skb)
2699                 return -ENOBUFS;
2700
2701         if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2702                                chain->index, net, skb, block, portid,
2703                                seq, flags, event) <= 0) {
2704                 kfree_skb(skb);
2705                 return -EINVAL;
2706         }
2707
2708         if (unicast)
2709                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2710         else
2711                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2712                                      flags & NLM_F_ECHO);
2713
2714         if (err > 0)
2715                 err = 0;
2716         return err;
2717 }
2718
2719 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2720                                   void *tmplt_priv, u32 chain_index,
2721                                   struct tcf_block *block, struct sk_buff *oskb,
2722                                   u32 seq, u16 flags, bool unicast)
2723 {
2724         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2725         struct net *net = block->net;
2726         struct sk_buff *skb;
2727
2728         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2729         if (!skb)
2730                 return -ENOBUFS;
2731
2732         if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2733                                block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2734                 kfree_skb(skb);
2735                 return -EINVAL;
2736         }
2737
2738         if (unicast)
2739                 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2740
2741         return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2742 }
2743
2744 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2745                               struct nlattr **tca,
2746                               struct netlink_ext_ack *extack)
2747 {
2748         const struct tcf_proto_ops *ops;
2749         char name[IFNAMSIZ];
2750         void *tmplt_priv;
2751
2752         /* If kind is not set, user did not specify template. */
2753         if (!tca[TCA_KIND])
2754                 return 0;
2755
2756         if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2757                 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2758                 return -EINVAL;
2759         }
2760
2761         ops = tcf_proto_lookup_ops(name, true, extack);
2762         if (IS_ERR(ops))
2763                 return PTR_ERR(ops);
2764         if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2765                 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2766                 return -EOPNOTSUPP;
2767         }
2768
2769         tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2770         if (IS_ERR(tmplt_priv)) {
2771                 module_put(ops->owner);
2772                 return PTR_ERR(tmplt_priv);
2773         }
2774         chain->tmplt_ops = ops;
2775         chain->tmplt_priv = tmplt_priv;
2776         return 0;
2777 }
2778
2779 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2780                                void *tmplt_priv)
2781 {
2782         /* If template ops are set, no work to do for us. */
2783         if (!tmplt_ops)
2784                 return;
2785
2786         tmplt_ops->tmplt_destroy(tmplt_priv);
2787         module_put(tmplt_ops->owner);
2788 }
2789
2790 /* Add/delete/get a chain */
2791
2792 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2793                         struct netlink_ext_ack *extack)
2794 {
2795         struct net *net = sock_net(skb->sk);
2796         struct nlattr *tca[TCA_MAX + 1];
2797         struct tcmsg *t;
2798         u32 parent;
2799         u32 chain_index;
2800         struct Qdisc *q = NULL;
2801         struct tcf_chain *chain = NULL;
2802         struct tcf_block *block;
2803         unsigned long cl;
2804         int err;
2805
2806         if (n->nlmsg_type != RTM_GETCHAIN &&
2807             !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2808                 return -EPERM;
2809
2810 replay:
2811         err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2812                                      rtm_tca_policy, extack);
2813         if (err < 0)
2814                 return err;
2815
2816         t = nlmsg_data(n);
2817         parent = t->tcm_parent;
2818         cl = 0;
2819
2820         block = tcf_block_find(net, &q, &parent, &cl,
2821                                t->tcm_ifindex, t->tcm_block_index, extack);
2822         if (IS_ERR(block))
2823                 return PTR_ERR(block);
2824
2825         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2826         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2827                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2828                 err = -EINVAL;
2829                 goto errout_block;
2830         }
2831
2832         mutex_lock(&block->lock);
2833         chain = tcf_chain_lookup(block, chain_index);
2834         if (n->nlmsg_type == RTM_NEWCHAIN) {
2835                 if (chain) {
2836                         if (tcf_chain_held_by_acts_only(chain)) {
2837                                 /* The chain exists only because there is
2838                                  * some action referencing it.
2839                                  */
2840                                 tcf_chain_hold(chain);
2841                         } else {
2842                                 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2843                                 err = -EEXIST;
2844                                 goto errout_block_locked;
2845                         }
2846                 } else {
2847                         if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2848                                 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2849                                 err = -ENOENT;
2850                                 goto errout_block_locked;
2851                         }
2852                         chain = tcf_chain_create(block, chain_index);
2853                         if (!chain) {
2854                                 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2855                                 err = -ENOMEM;
2856                                 goto errout_block_locked;
2857                         }
2858                 }
2859         } else {
2860                 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2861                         NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2862                         err = -EINVAL;
2863                         goto errout_block_locked;
2864                 }
2865                 tcf_chain_hold(chain);
2866         }
2867
2868         if (n->nlmsg_type == RTM_NEWCHAIN) {
2869                 /* Modifying chain requires holding parent block lock. In case
2870                  * the chain was successfully added, take a reference to the
2871                  * chain. This ensures that an empty chain does not disappear at
2872                  * the end of this function.
2873                  */
2874                 tcf_chain_hold(chain);
2875                 chain->explicitly_created = true;
2876         }
2877         mutex_unlock(&block->lock);
2878
2879         switch (n->nlmsg_type) {
2880         case RTM_NEWCHAIN:
2881                 err = tc_chain_tmplt_add(chain, net, tca, extack);
2882                 if (err) {
2883                         tcf_chain_put_explicitly_created(chain);
2884                         goto errout;
2885                 }
2886
2887                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2888                                 RTM_NEWCHAIN, false);
2889                 break;
2890         case RTM_DELCHAIN:
2891                 tfilter_notify_chain(net, skb, block, q, parent, n,
2892                                      chain, RTM_DELTFILTER, true);
2893                 /* Flush the chain first as the user requested chain removal. */
2894                 tcf_chain_flush(chain, true);
2895                 /* In case the chain was successfully deleted, put a reference
2896                  * to the chain previously taken during addition.
2897                  */
2898                 tcf_chain_put_explicitly_created(chain);
2899                 break;
2900         case RTM_GETCHAIN:
2901                 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2902                                       n->nlmsg_seq, n->nlmsg_type, true);
2903                 if (err < 0)
2904                         NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2905                 break;
2906         default:
2907                 err = -EOPNOTSUPP;
2908                 NL_SET_ERR_MSG(extack, "Unsupported message type");
2909                 goto errout;
2910         }
2911
2912 errout:
2913         tcf_chain_put(chain);
2914 errout_block:
2915         tcf_block_release(q, block, true);
2916         if (err == -EAGAIN)
2917                 /* Replay the request. */
2918                 goto replay;
2919         return err;
2920
2921 errout_block_locked:
2922         mutex_unlock(&block->lock);
2923         goto errout_block;
2924 }
2925
2926 /* called with RTNL */
2927 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2928 {
2929         struct net *net = sock_net(skb->sk);
2930         struct nlattr *tca[TCA_MAX + 1];
2931         struct Qdisc *q = NULL;
2932         struct tcf_block *block;
2933         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2934         struct tcf_chain *chain;
2935         long index_start;
2936         long index;
2937         u32 parent;
2938         int err;
2939
2940         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2941                 return skb->len;
2942
2943         err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2944                                      rtm_tca_policy, cb->extack);
2945         if (err)
2946                 return err;
2947
2948         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2949                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2950                 if (!block)
2951                         goto out;
2952                 /* If we work with block index, q is NULL and parent value
2953                  * will never be used in the following code. The check
2954                  * in tcf_fill_node prevents it. However, compiler does not
2955                  * see that far, so set parent to zero to silence the warning
2956                  * about parent being uninitialized.
2957                  */
2958                 parent = 0;
2959         } else {
2960                 const struct Qdisc_class_ops *cops;
2961                 struct net_device *dev;
2962                 unsigned long cl = 0;
2963
2964                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2965                 if (!dev)
2966                         return skb->len;
2967
2968                 parent = tcm->tcm_parent;
2969                 if (!parent) {
2970                         q = dev->qdisc;
2971                         parent = q->handle;
2972                 } else {
2973                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2974                 }
2975                 if (!q)
2976                         goto out;
2977                 cops = q->ops->cl_ops;
2978                 if (!cops)
2979                         goto out;
2980                 if (!cops->tcf_block)
2981                         goto out;
2982                 if (TC_H_MIN(tcm->tcm_parent)) {
2983                         cl = cops->find(q, tcm->tcm_parent);
2984                         if (cl == 0)
2985                                 goto out;
2986                 }
2987                 block = cops->tcf_block(q, cl, NULL);
2988                 if (!block)
2989                         goto out;
2990                 if (tcf_block_shared(block))
2991                         q = NULL;
2992         }
2993
2994         index_start = cb->args[0];
2995         index = 0;
2996
2997         mutex_lock(&block->lock);
2998         list_for_each_entry(chain, &block->chain_list, list) {
2999                 if ((tca[TCA_CHAIN] &&
3000                      nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3001                         continue;
3002                 if (index < index_start) {
3003                         index++;
3004                         continue;
3005                 }
3006                 if (tcf_chain_held_by_acts_only(chain))
3007                         continue;
3008                 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3009                                          chain->index, net, skb, block,
3010                                          NETLINK_CB(cb->skb).portid,
3011                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
3012                                          RTM_NEWCHAIN);
3013                 if (err <= 0)
3014                         break;
3015                 index++;
3016         }
3017         mutex_unlock(&block->lock);
3018
3019         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3020                 tcf_block_refcnt_put(block, true);
3021         cb->args[0] = index;
3022
3023 out:
3024         /* If we did no progress, the error (EMSGSIZE) is real */
3025         if (skb->len == 0 && err)
3026                 return err;
3027         return skb->len;
3028 }
3029
3030 void tcf_exts_destroy(struct tcf_exts *exts)
3031 {
3032 #ifdef CONFIG_NET_CLS_ACT
3033         if (exts->actions) {
3034                 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3035                 kfree(exts->actions);
3036         }
3037         exts->nr_actions = 0;
3038 #endif
3039 }
3040 EXPORT_SYMBOL(tcf_exts_destroy);
3041
3042 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3043                       struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3044                       bool rtnl_held, struct netlink_ext_ack *extack)
3045 {
3046 #ifdef CONFIG_NET_CLS_ACT
3047         {
3048                 struct tc_action *act;
3049                 size_t attr_size = 0;
3050
3051                 if (exts->police && tb[exts->police]) {
3052                         act = tcf_action_init_1(net, tp, tb[exts->police],
3053                                                 rate_tlv, "police", ovr,
3054                                                 TCA_ACT_BIND, rtnl_held,
3055                                                 extack);
3056                         if (IS_ERR(act))
3057                                 return PTR_ERR(act);
3058
3059                         act->type = exts->type = TCA_OLD_COMPAT;
3060                         exts->actions[0] = act;
3061                         exts->nr_actions = 1;
3062                 } else if (exts->action && tb[exts->action]) {
3063                         int err;
3064
3065                         err = tcf_action_init(net, tp, tb[exts->action],
3066                                               rate_tlv, NULL, ovr, TCA_ACT_BIND,
3067                                               exts->actions, &attr_size,
3068                                               rtnl_held, extack);
3069                         if (err < 0)
3070                                 return err;
3071                         exts->nr_actions = err;
3072                 }
3073         }
3074 #else
3075         if ((exts->action && tb[exts->action]) ||
3076             (exts->police && tb[exts->police])) {
3077                 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3078                 return -EOPNOTSUPP;
3079         }
3080 #endif
3081
3082         return 0;
3083 }
3084 EXPORT_SYMBOL(tcf_exts_validate);
3085
3086 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3087 {
3088 #ifdef CONFIG_NET_CLS_ACT
3089         struct tcf_exts old = *dst;
3090
3091         *dst = *src;
3092         tcf_exts_destroy(&old);
3093 #endif
3094 }
3095 EXPORT_SYMBOL(tcf_exts_change);
3096
3097 #ifdef CONFIG_NET_CLS_ACT
3098 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3099 {
3100         if (exts->nr_actions == 0)
3101                 return NULL;
3102         else
3103                 return exts->actions[0];
3104 }
3105 #endif
3106
3107 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3108 {
3109 #ifdef CONFIG_NET_CLS_ACT
3110         struct nlattr *nest;
3111
3112         if (exts->action && tcf_exts_has_actions(exts)) {
3113                 /*
3114                  * again for backward compatible mode - we want
3115                  * to work with both old and new modes of entering
3116                  * tc data even if iproute2  was newer - jhs
3117                  */
3118                 if (exts->type != TCA_OLD_COMPAT) {
3119                         nest = nla_nest_start_noflag(skb, exts->action);
3120                         if (nest == NULL)
3121                                 goto nla_put_failure;
3122
3123                         if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3124                             < 0)
3125                                 goto nla_put_failure;
3126                         nla_nest_end(skb, nest);
3127                 } else if (exts->police) {
3128                         struct tc_action *act = tcf_exts_first_act(exts);
3129                         nest = nla_nest_start_noflag(skb, exts->police);
3130                         if (nest == NULL || !act)
3131                                 goto nla_put_failure;
3132                         if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3133                                 goto nla_put_failure;
3134                         nla_nest_end(skb, nest);
3135                 }
3136         }
3137         return 0;
3138
3139 nla_put_failure:
3140         nla_nest_cancel(skb, nest);
3141         return -1;
3142 #else
3143         return 0;
3144 #endif
3145 }
3146 EXPORT_SYMBOL(tcf_exts_dump);
3147
3148 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3149 {
3150 #ifdef CONFIG_NET_CLS_ACT
3151         struct nlattr *nest;
3152
3153         if (!exts->action || !tcf_exts_has_actions(exts))
3154                 return 0;
3155
3156         nest = nla_nest_start_noflag(skb, exts->action);
3157         if (!nest)
3158                 goto nla_put_failure;
3159
3160         if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3161                 goto nla_put_failure;
3162         nla_nest_end(skb, nest);
3163         return 0;
3164
3165 nla_put_failure:
3166         nla_nest_cancel(skb, nest);
3167         return -1;
3168 #else
3169         return 0;
3170 #endif
3171 }
3172 EXPORT_SYMBOL(tcf_exts_terse_dump);
3173
3174 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3175 {
3176 #ifdef CONFIG_NET_CLS_ACT
3177         struct tc_action *a = tcf_exts_first_act(exts);
3178         if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3179                 return -1;
3180 #endif
3181         return 0;
3182 }
3183 EXPORT_SYMBOL(tcf_exts_dump_stats);
3184
3185 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3186 {
3187         if (*flags & TCA_CLS_FLAGS_IN_HW)
3188                 return;
3189         *flags |= TCA_CLS_FLAGS_IN_HW;
3190         atomic_inc(&block->offloadcnt);
3191 }
3192
3193 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3194 {
3195         if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3196                 return;
3197         *flags &= ~TCA_CLS_FLAGS_IN_HW;
3198         atomic_dec(&block->offloadcnt);
3199 }
3200
3201 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3202                                       struct tcf_proto *tp, u32 *cnt,
3203                                       u32 *flags, u32 diff, bool add)
3204 {
3205         lockdep_assert_held(&block->cb_lock);
3206
3207         spin_lock(&tp->lock);
3208         if (add) {
3209                 if (!*cnt)
3210                         tcf_block_offload_inc(block, flags);
3211                 *cnt += diff;
3212         } else {
3213                 *cnt -= diff;
3214                 if (!*cnt)
3215                         tcf_block_offload_dec(block, flags);
3216         }
3217         spin_unlock(&tp->lock);
3218 }
3219
3220 static void
3221 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3222                          u32 *cnt, u32 *flags)
3223 {
3224         lockdep_assert_held(&block->cb_lock);
3225
3226         spin_lock(&tp->lock);
3227         tcf_block_offload_dec(block, flags);
3228         *cnt = 0;
3229         spin_unlock(&tp->lock);
3230 }
3231
3232 static int
3233 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3234                    void *type_data, bool err_stop)
3235 {
3236         struct flow_block_cb *block_cb;
3237         int ok_count = 0;
3238         int err;
3239
3240         list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3241                 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3242                 if (err) {
3243                         if (err_stop)
3244                                 return err;
3245                 } else {
3246                         ok_count++;
3247                 }
3248         }
3249         return ok_count;
3250 }
3251
3252 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3253                      void *type_data, bool err_stop, bool rtnl_held)
3254 {
3255         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3256         int ok_count;
3257
3258 retry:
3259         if (take_rtnl)
3260                 rtnl_lock();
3261         down_read(&block->cb_lock);
3262         /* Need to obtain rtnl lock if block is bound to devs that require it.
3263          * In block bind code cb_lock is obtained while holding rtnl, so we must
3264          * obtain the locks in same order here.
3265          */
3266         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3267                 up_read(&block->cb_lock);
3268                 take_rtnl = true;
3269                 goto retry;
3270         }
3271
3272         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3273
3274         up_read(&block->cb_lock);
3275         if (take_rtnl)
3276                 rtnl_unlock();
3277         return ok_count;
3278 }
3279 EXPORT_SYMBOL(tc_setup_cb_call);
3280
3281 /* Non-destructive filter add. If filter that wasn't already in hardware is
3282  * successfully offloaded, increment block offloads counter. On failure,
3283  * previously offloaded filter is considered to be intact and offloads counter
3284  * is not decremented.
3285  */
3286
3287 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3288                     enum tc_setup_type type, void *type_data, bool err_stop,
3289                     u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3290 {
3291         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3292         int ok_count;
3293
3294 retry:
3295         if (take_rtnl)
3296                 rtnl_lock();
3297         down_read(&block->cb_lock);
3298         /* Need to obtain rtnl lock if block is bound to devs that require it.
3299          * In block bind code cb_lock is obtained while holding rtnl, so we must
3300          * obtain the locks in same order here.
3301          */
3302         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3303                 up_read(&block->cb_lock);
3304                 take_rtnl = true;
3305                 goto retry;
3306         }
3307
3308         /* Make sure all netdevs sharing this block are offload-capable. */
3309         if (block->nooffloaddevcnt && err_stop) {
3310                 ok_count = -EOPNOTSUPP;
3311                 goto err_unlock;
3312         }
3313
3314         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3315         if (ok_count < 0)
3316                 goto err_unlock;
3317
3318         if (tp->ops->hw_add)
3319                 tp->ops->hw_add(tp, type_data);
3320         if (ok_count > 0)
3321                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3322                                           ok_count, true);
3323 err_unlock:
3324         up_read(&block->cb_lock);
3325         if (take_rtnl)
3326                 rtnl_unlock();
3327         return ok_count < 0 ? ok_count : 0;
3328 }
3329 EXPORT_SYMBOL(tc_setup_cb_add);
3330
3331 /* Destructive filter replace. If filter that wasn't already in hardware is
3332  * successfully offloaded, increment block offload counter. On failure,
3333  * previously offloaded filter is considered to be destroyed and offload counter
3334  * is decremented.
3335  */
3336
3337 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3338                         enum tc_setup_type type, void *type_data, bool err_stop,
3339                         u32 *old_flags, unsigned int *old_in_hw_count,
3340                         u32 *new_flags, unsigned int *new_in_hw_count,
3341                         bool rtnl_held)
3342 {
3343         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3344         int ok_count;
3345
3346 retry:
3347         if (take_rtnl)
3348                 rtnl_lock();
3349         down_read(&block->cb_lock);
3350         /* Need to obtain rtnl lock if block is bound to devs that require it.
3351          * In block bind code cb_lock is obtained while holding rtnl, so we must
3352          * obtain the locks in same order here.
3353          */
3354         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3355                 up_read(&block->cb_lock);
3356                 take_rtnl = true;
3357                 goto retry;
3358         }
3359
3360         /* Make sure all netdevs sharing this block are offload-capable. */
3361         if (block->nooffloaddevcnt && err_stop) {
3362                 ok_count = -EOPNOTSUPP;
3363                 goto err_unlock;
3364         }
3365
3366         tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3367         if (tp->ops->hw_del)
3368                 tp->ops->hw_del(tp, type_data);
3369
3370         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3371         if (ok_count < 0)
3372                 goto err_unlock;
3373
3374         if (tp->ops->hw_add)
3375                 tp->ops->hw_add(tp, type_data);
3376         if (ok_count > 0)
3377                 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3378                                           new_flags, ok_count, true);
3379 err_unlock:
3380         up_read(&block->cb_lock);
3381         if (take_rtnl)
3382                 rtnl_unlock();
3383         return ok_count < 0 ? ok_count : 0;
3384 }
3385 EXPORT_SYMBOL(tc_setup_cb_replace);
3386
3387 /* Destroy filter and decrement block offload counter, if filter was previously
3388  * offloaded.
3389  */
3390
3391 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3392                         enum tc_setup_type type, void *type_data, bool err_stop,
3393                         u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3394 {
3395         bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3396         int ok_count;
3397
3398 retry:
3399         if (take_rtnl)
3400                 rtnl_lock();
3401         down_read(&block->cb_lock);
3402         /* Need to obtain rtnl lock if block is bound to devs that require it.
3403          * In block bind code cb_lock is obtained while holding rtnl, so we must
3404          * obtain the locks in same order here.
3405          */
3406         if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3407                 up_read(&block->cb_lock);
3408                 take_rtnl = true;
3409                 goto retry;
3410         }
3411
3412         ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3413
3414         tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3415         if (tp->ops->hw_del)
3416                 tp->ops->hw_del(tp, type_data);
3417
3418         up_read(&block->cb_lock);
3419         if (take_rtnl)
3420                 rtnl_unlock();
3421         return ok_count < 0 ? ok_count : 0;
3422 }
3423 EXPORT_SYMBOL(tc_setup_cb_destroy);
3424
3425 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3426                           bool add, flow_setup_cb_t *cb,
3427                           enum tc_setup_type type, void *type_data,
3428                           void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3429 {
3430         int err = cb(type, type_data, cb_priv);
3431
3432         if (err) {
3433                 if (add && tc_skip_sw(*flags))
3434                         return err;
3435         } else {
3436                 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3437                                           add);
3438         }
3439
3440         return 0;
3441 }
3442 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3443
3444 static int tcf_act_get_cookie(struct flow_action_entry *entry,
3445                               const struct tc_action *act)
3446 {
3447         struct tc_cookie *cookie;
3448         int err = 0;
3449
3450         rcu_read_lock();
3451         cookie = rcu_dereference(act->act_cookie);
3452         if (cookie) {
3453                 entry->cookie = flow_action_cookie_create(cookie->data,
3454                                                           cookie->len,
3455                                                           GFP_ATOMIC);
3456                 if (!entry->cookie)
3457                         err = -ENOMEM;
3458         }
3459         rcu_read_unlock();
3460         return err;
3461 }
3462
3463 static void tcf_act_put_cookie(struct flow_action_entry *entry)
3464 {
3465         flow_action_cookie_destroy(entry->cookie);
3466 }
3467
3468 void tc_cleanup_flow_action(struct flow_action *flow_action)
3469 {
3470         struct flow_action_entry *entry;
3471         int i;
3472
3473         flow_action_for_each(i, entry, flow_action) {
3474                 tcf_act_put_cookie(entry);
3475                 if (entry->destructor)
3476                         entry->destructor(entry->destructor_priv);
3477         }
3478 }
3479 EXPORT_SYMBOL(tc_cleanup_flow_action);
3480
3481 static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3482                                const struct tc_action *act)
3483 {
3484 #ifdef CONFIG_NET_CLS_ACT
3485         entry->dev = act->ops->get_dev(act, &entry->destructor);
3486         if (!entry->dev)
3487                 return;
3488         entry->destructor_priv = entry->dev;
3489 #endif
3490 }
3491
3492 static void tcf_tunnel_encap_put_tunnel(void *priv)
3493 {
3494         struct ip_tunnel_info *tunnel = priv;
3495
3496         kfree(tunnel);
3497 }
3498
3499 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3500                                        const struct tc_action *act)
3501 {
3502         entry->tunnel = tcf_tunnel_info_copy(act);
3503         if (!entry->tunnel)
3504                 return -ENOMEM;
3505         entry->destructor = tcf_tunnel_encap_put_tunnel;
3506         entry->destructor_priv = entry->tunnel;
3507         return 0;
3508 }
3509
3510 static void tcf_sample_get_group(struct flow_action_entry *entry,
3511                                  const struct tc_action *act)
3512 {
3513 #ifdef CONFIG_NET_CLS_ACT
3514         entry->sample.psample_group =
3515                 act->ops->get_psample_group(act, &entry->destructor);
3516         entry->destructor_priv = entry->sample.psample_group;
3517 #endif
3518 }
3519
3520 static void tcf_gate_entry_destructor(void *priv)
3521 {
3522         struct action_gate_entry *oe = priv;
3523
3524         kfree(oe);
3525 }
3526
3527 static int tcf_gate_get_entries(struct flow_action_entry *entry,
3528                                 const struct tc_action *act)
3529 {
3530         entry->gate.entries = tcf_gate_get_list(act);
3531
3532         if (!entry->gate.entries)
3533                 return -EINVAL;
3534
3535         entry->destructor = tcf_gate_entry_destructor;
3536         entry->destructor_priv = entry->gate.entries;
3537
3538         return 0;
3539 }
3540
3541 static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
3542 {
3543         if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
3544                 return FLOW_ACTION_HW_STATS_DONT_CARE;
3545         else if (!hw_stats)
3546                 return FLOW_ACTION_HW_STATS_DISABLED;
3547
3548         return hw_stats;
3549 }
3550
3551 int tc_setup_flow_action(struct flow_action *flow_action,
3552                          const struct tcf_exts *exts)
3553 {
3554         struct tc_action *act;
3555         int i, j, k, err = 0;
3556
3557         BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3558         BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3559         BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3560
3561         if (!exts)
3562                 return 0;
3563
3564         j = 0;
3565         tcf_exts_for_each_action(i, act, exts) {
3566                 struct flow_action_entry *entry;
3567
3568                 entry = &flow_action->entries[j];
3569                 spin_lock_bh(&act->tcfa_lock);
3570                 err = tcf_act_get_cookie(entry, act);
3571                 if (err)
3572                         goto err_out_locked;
3573
3574                 entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3575
3576                 if (is_tcf_gact_ok(act)) {
3577                         entry->id = FLOW_ACTION_ACCEPT;
3578                 } else if (is_tcf_gact_shot(act)) {
3579                         entry->id = FLOW_ACTION_DROP;
3580                 } else if (is_tcf_gact_trap(act)) {
3581                         entry->id = FLOW_ACTION_TRAP;
3582                 } else if (is_tcf_gact_goto_chain(act)) {
3583                         entry->id = FLOW_ACTION_GOTO;
3584                         entry->chain_index = tcf_gact_goto_chain_index(act);
3585                 } else if (is_tcf_mirred_egress_redirect(act)) {
3586                         entry->id = FLOW_ACTION_REDIRECT;
3587                         tcf_mirred_get_dev(entry, act);
3588                 } else if (is_tcf_mirred_egress_mirror(act)) {
3589                         entry->id = FLOW_ACTION_MIRRED;
3590                         tcf_mirred_get_dev(entry, act);
3591                 } else if (is_tcf_mirred_ingress_redirect(act)) {
3592                         entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3593                         tcf_mirred_get_dev(entry, act);
3594                 } else if (is_tcf_mirred_ingress_mirror(act)) {
3595                         entry->id = FLOW_ACTION_MIRRED_INGRESS;
3596                         tcf_mirred_get_dev(entry, act);
3597                 } else if (is_tcf_vlan(act)) {
3598                         switch (tcf_vlan_action(act)) {
3599                         case TCA_VLAN_ACT_PUSH:
3600                                 entry->id = FLOW_ACTION_VLAN_PUSH;
3601                                 entry->vlan.vid = tcf_vlan_push_vid(act);
3602                                 entry->vlan.proto = tcf_vlan_push_proto(act);
3603                                 entry->vlan.prio = tcf_vlan_push_prio(act);
3604                                 break;
3605                         case TCA_VLAN_ACT_POP:
3606                                 entry->id = FLOW_ACTION_VLAN_POP;
3607                                 break;
3608                         case TCA_VLAN_ACT_MODIFY:
3609                                 entry->id = FLOW_ACTION_VLAN_MANGLE;
3610                                 entry->vlan.vid = tcf_vlan_push_vid(act);
3611                                 entry->vlan.proto = tcf_vlan_push_proto(act);
3612                                 entry->vlan.prio = tcf_vlan_push_prio(act);
3613                                 break;
3614                         default:
3615                                 err = -EOPNOTSUPP;
3616                                 goto err_out_locked;
3617                         }
3618                 } else if (is_tcf_tunnel_set(act)) {
3619                         entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3620                         err = tcf_tunnel_encap_get_tunnel(entry, act);
3621                         if (err)
3622                                 goto err_out_locked;
3623                 } else if (is_tcf_tunnel_release(act)) {
3624                         entry->id = FLOW_ACTION_TUNNEL_DECAP;
3625                 } else if (is_tcf_pedit(act)) {
3626                         for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3627                                 switch (tcf_pedit_cmd(act, k)) {
3628                                 case TCA_PEDIT_KEY_EX_CMD_SET:
3629                                         entry->id = FLOW_ACTION_MANGLE;
3630                                         break;
3631                                 case TCA_PEDIT_KEY_EX_CMD_ADD:
3632                                         entry->id = FLOW_ACTION_ADD;
3633                                         break;
3634                                 default:
3635                                         err = -EOPNOTSUPP;
3636                                         goto err_out_locked;
3637                                 }
3638                                 entry->mangle.htype = tcf_pedit_htype(act, k);
3639                                 entry->mangle.mask = tcf_pedit_mask(act, k);
3640                                 entry->mangle.val = tcf_pedit_val(act, k);
3641                                 entry->mangle.offset = tcf_pedit_offset(act, k);
3642                                 entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3643                                 entry = &flow_action->entries[++j];
3644                         }
3645                 } else if (is_tcf_csum(act)) {
3646                         entry->id = FLOW_ACTION_CSUM;
3647                         entry->csum_flags = tcf_csum_update_flags(act);
3648                 } else if (is_tcf_skbedit_mark(act)) {
3649                         entry->id = FLOW_ACTION_MARK;
3650                         entry->mark = tcf_skbedit_mark(act);
3651                 } else if (is_tcf_sample(act)) {
3652                         entry->id = FLOW_ACTION_SAMPLE;
3653                         entry->sample.trunc_size = tcf_sample_trunc_size(act);
3654                         entry->sample.truncate = tcf_sample_truncate(act);
3655                         entry->sample.rate = tcf_sample_rate(act);
3656                         tcf_sample_get_group(entry, act);
3657                 } else if (is_tcf_police(act)) {
3658                         entry->id = FLOW_ACTION_POLICE;
3659                         entry->police.burst = tcf_police_tcfp_burst(act);
3660                         entry->police.rate_bytes_ps =
3661                                 tcf_police_rate_bytes_ps(act);
3662                 } else if (is_tcf_ct(act)) {
3663                         entry->id = FLOW_ACTION_CT;
3664                         entry->ct.action = tcf_ct_action(act);
3665                         entry->ct.zone = tcf_ct_zone(act);
3666                         entry->ct.flow_table = tcf_ct_ft(act);
3667                 } else if (is_tcf_mpls(act)) {
3668                         switch (tcf_mpls_action(act)) {
3669                         case TCA_MPLS_ACT_PUSH:
3670                                 entry->id = FLOW_ACTION_MPLS_PUSH;
3671                                 entry->mpls_push.proto = tcf_mpls_proto(act);
3672                                 entry->mpls_push.label = tcf_mpls_label(act);
3673                                 entry->mpls_push.tc = tcf_mpls_tc(act);
3674                                 entry->mpls_push.bos = tcf_mpls_bos(act);
3675                                 entry->mpls_push.ttl = tcf_mpls_ttl(act);
3676                                 break;
3677                         case TCA_MPLS_ACT_POP:
3678                                 entry->id = FLOW_ACTION_MPLS_POP;
3679                                 entry->mpls_pop.proto = tcf_mpls_proto(act);
3680                                 break;
3681                         case TCA_MPLS_ACT_MODIFY:
3682                                 entry->id = FLOW_ACTION_MPLS_MANGLE;
3683                                 entry->mpls_mangle.label = tcf_mpls_label(act);
3684                                 entry->mpls_mangle.tc = tcf_mpls_tc(act);
3685                                 entry->mpls_mangle.bos = tcf_mpls_bos(act);
3686                                 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3687                                 break;
3688                         default:
3689                                 goto err_out_locked;
3690                         }
3691                 } else if (is_tcf_skbedit_ptype(act)) {
3692                         entry->id = FLOW_ACTION_PTYPE;
3693                         entry->ptype = tcf_skbedit_ptype(act);
3694                 } else if (is_tcf_skbedit_priority(act)) {
3695                         entry->id = FLOW_ACTION_PRIORITY;
3696                         entry->priority = tcf_skbedit_priority(act);
3697                 } else if (is_tcf_gate(act)) {
3698                         entry->id = FLOW_ACTION_GATE;
3699                         entry->gate.index = tcf_gate_index(act);
3700                         entry->gate.prio = tcf_gate_prio(act);
3701                         entry->gate.basetime = tcf_gate_basetime(act);
3702                         entry->gate.cycletime = tcf_gate_cycletime(act);
3703                         entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
3704                         entry->gate.num_entries = tcf_gate_num_entries(act);
3705                         err = tcf_gate_get_entries(entry, act);
3706                         if (err)
3707                                 goto err_out;
3708                 } else {
3709                         err = -EOPNOTSUPP;
3710                         goto err_out_locked;
3711                 }
3712                 spin_unlock_bh(&act->tcfa_lock);
3713
3714                 if (!is_tcf_pedit(act))
3715                         j++;
3716         }
3717
3718 err_out:
3719         if (err)
3720                 tc_cleanup_flow_action(flow_action);
3721
3722         return err;
3723 err_out_locked:
3724         spin_unlock_bh(&act->tcfa_lock);
3725         goto err_out;
3726 }
3727 EXPORT_SYMBOL(tc_setup_flow_action);
3728
3729 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3730 {
3731         unsigned int num_acts = 0;
3732         struct tc_action *act;
3733         int i;
3734
3735         tcf_exts_for_each_action(i, act, exts) {
3736                 if (is_tcf_pedit(act))
3737                         num_acts += tcf_pedit_nkeys(act);
3738                 else
3739                         num_acts++;
3740         }
3741         return num_acts;
3742 }
3743 EXPORT_SYMBOL(tcf_exts_num_actions);
3744
3745 static __net_init int tcf_net_init(struct net *net)
3746 {
3747         struct tcf_net *tn = net_generic(net, tcf_net_id);
3748
3749         spin_lock_init(&tn->idr_lock);
3750         idr_init(&tn->idr);
3751         return 0;
3752 }
3753
3754 static void __net_exit tcf_net_exit(struct net *net)
3755 {
3756         struct tcf_net *tn = net_generic(net, tcf_net_id);
3757
3758         idr_destroy(&tn->idr);
3759 }
3760
3761 static struct pernet_operations tcf_net_ops = {
3762         .init = tcf_net_init,
3763         .exit = tcf_net_exit,
3764         .id   = &tcf_net_id,
3765         .size = sizeof(struct tcf_net),
3766 };
3767
3768 static int __init tc_filter_init(void)
3769 {
3770         int err;
3771
3772         tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3773         if (!tc_filter_wq)
3774                 return -ENOMEM;
3775
3776         err = register_pernet_subsys(&tcf_net_ops);
3777         if (err)
3778                 goto err_register_pernet_subsys;
3779
3780         rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3781                       RTNL_FLAG_DOIT_UNLOCKED);
3782         rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3783                       RTNL_FLAG_DOIT_UNLOCKED);
3784         rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3785                       tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3786         rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3787         rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3788         rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3789                       tc_dump_chain, 0);
3790
3791         return 0;
3792
3793 err_register_pernet_subsys:
3794         destroy_workqueue(tc_filter_wq);
3795         return err;
3796 }
3797
3798 subsys_initcall(tc_filter_init);