net: sched: Introduce ingress classification function
authorPaul Blakey <paulb@mellanox.com>
Sun, 16 Feb 2020 10:01:21 +0000 (12:01 +0200)
committerSaeed Mahameed <saeedm@mellanox.com>
Thu, 20 Feb 2020 01:49:48 +0000 (17:49 -0800)
TC multi chain configuration can cause offloaded tc chains to miss in
hardware after jumping to some chain. In such cases the software should
continue from the chain that missed in hardware, as the hardware may
have manipulated the packet and updated some counters.

Currently a single tcf classification function serves both ingress and
egress. However, multi chain miss processing (get tc skb extension on
hw miss, set tc skb extension on tc miss) should happen only on
ingress.

Refactor the code to use ingress classification function, and move setting
the tc skb extension from general classification to it, as a prestep
for supporting the hw miss scenario.

Co-developed-by: Vlad Buslov <vladbu@mellanox.com>
Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Signed-off-by: Paul Blakey <paulb@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
include/net/pkt_cls.h
net/core/dev.c
net/sched/cls_api.c

index 53946b5..109cbe3 100644 (file)
@@ -72,6 +72,8 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 
 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                 struct tcf_result *res, bool compat_mode);
+int tcf_classify_ingress(struct sk_buff *skb, const struct tcf_proto *tp,
+                        struct tcf_result *res, bool compat_mode);
 
 #else
 static inline bool tcf_block_shared(struct tcf_block *block)
@@ -133,6 +135,14 @@ static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 {
        return TC_ACT_UNSPEC;
 }
+
+static inline int tcf_classify_ingress(struct sk_buff *skb,
+                                      const struct tcf_proto *tp,
+                                      struct tcf_result *res, bool compat_mode)
+{
+       return TC_ACT_UNSPEC;
+}
+
 #endif
 
 static inline unsigned long
index a6316b3..107af00 100644 (file)
@@ -4860,7 +4860,8 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
        skb->tc_at_ingress = 1;
        mini_qdisc_bstats_cpu_update(miniq, skb);
 
-       switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
+       switch (tcf_classify_ingress(skb, miniq->filter_list, &cl_res,
+                                    false)) {
        case TC_ACT_OK:
        case TC_ACT_RECLASSIFY:
                skb->tc_index = TC_H_MIN(cl_res.classid);
index 13c33ea..d52b43c 100644 (file)
@@ -1559,8 +1559,11 @@ static int tcf_block_setup(struct tcf_block *block,
  * to this qdisc, (optionally) tests for protocol and asks
  * specific classifiers.
  */
-int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
-                struct tcf_result *res, bool compat_mode)
+static inline int __tcf_classify(struct sk_buff *skb,
+                                const struct tcf_proto *tp,
+                                struct tcf_result *res,
+                                bool compat_mode,
+                                u32 *last_executed_chain)
 {
 #ifdef CONFIG_NET_CLS_ACT
        const int max_reclassify_loop = 4;
@@ -1582,21 +1585,11 @@ reclassify:
 #ifdef CONFIG_NET_CLS_ACT
                if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
                        first_tp = orig_tp;
+                       *last_executed_chain = first_tp->chain->index;
                        goto reset;
                } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
                        first_tp = res->goto_tp;
-
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
-                       {
-                               struct tc_skb_ext *ext;
-
-                               ext = skb_ext_add(skb, TC_SKB_EXT);
-                               if (WARN_ON_ONCE(!ext))
-                                       return TC_ACT_SHOT;
-
-                               ext->chain = err & TC_ACT_EXT_VAL_MASK;
-                       }
-#endif
+                       *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
                        goto reset;
                }
 #endif
@@ -1619,8 +1612,45 @@ reset:
        goto reclassify;
 #endif
 }
+
+int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+                struct tcf_result *res, bool compat_mode)
+{
+       u32 last_executed_chain = 0;
+
+       return __tcf_classify(skb, tp, res, compat_mode,
+                             &last_executed_chain);
+}
 EXPORT_SYMBOL(tcf_classify);
 
+int tcf_classify_ingress(struct sk_buff *skb, const struct tcf_proto *tp,
+                        struct tcf_result *res, bool compat_mode)
+{
+#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+       u32 last_executed_chain = 0;
+
+       return __tcf_classify(skb, tp, res, compat_mode,
+                             &last_executed_chain);
+#else
+       u32 last_executed_chain = tp ? tp->chain->index : 0;
+       struct tc_skb_ext *ext;
+       int ret;
+
+       ret = __tcf_classify(skb, tp, res, compat_mode, &last_executed_chain);
+
+       /* If we missed on some chain */
+       if (ret == TC_ACT_UNSPEC && last_executed_chain) {
+               ext = skb_ext_add(skb, TC_SKB_EXT);
+               if (WARN_ON_ONCE(!ext))
+                       return TC_ACT_SHOT;
+               ext->chain = last_executed_chain;
+       }
+
+       return ret;
+#endif
+}
+EXPORT_SYMBOL(tcf_classify_ingress);
+
 struct tcf_chain_info {
        struct tcf_proto __rcu **pprev;
        struct tcf_proto __rcu *next;