Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / tc_ct.c
index a8be40c..e521254 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/workqueue.h>
 #include <linux/xarray.h>
 
-#include "esw/chains.h"
+#include "lib/fs_chains.h"
 #include "en/tc_ct.h"
 #include "en/mod_hdr.h"
 #include "en/mapping.h"
@@ -39,8 +39,9 @@
        netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
 
 struct mlx5_tc_ct_priv {
-       struct mlx5_eswitch *esw;
+       struct mlx5_core_dev *dev;
        const struct net_device *netdev;
+       struct mod_hdr_tbl *mod_hdr_tbl;
        struct idr fte_ids;
        struct xarray tuple_ids;
        struct rhashtable zone_ht;
@@ -50,13 +51,16 @@ struct mlx5_tc_ct_priv {
        struct mlx5_flow_table *ct_nat;
        struct mlx5_flow_table *post_ct;
        struct mutex control_lock; /* guards parallel adds/dels */
+       struct mutex shared_counter_lock;
        struct mapping_ctx *zone_mapping;
        struct mapping_ctx *labels_mapping;
+       enum mlx5_flow_namespace_type ns_type;
+       struct mlx5_fs_chains *chains;
 };
 
 struct mlx5_ct_flow {
-       struct mlx5_esw_flow_attr pre_ct_attr;
-       struct mlx5_esw_flow_attr post_ct_attr;
+       struct mlx5_flow_attr *pre_ct_attr;
+       struct mlx5_flow_attr *post_ct_attr;
        struct mlx5_flow_handle *pre_ct_rule;
        struct mlx5_flow_handle *post_ct_rule;
        struct mlx5_ct_ft *ft;
@@ -67,12 +71,12 @@ struct mlx5_ct_flow {
 struct mlx5_ct_zone_rule {
        struct mlx5_flow_handle *rule;
        struct mlx5e_mod_hdr_handle *mh;
-       struct mlx5_esw_flow_attr attr;
+       struct mlx5_flow_attr *attr;
        bool nat;
 };
 
 struct mlx5_tc_ct_pre {
-       struct mlx5_flow_table *fdb;
+       struct mlx5_flow_table *ft;
        struct mlx5_flow_group *flow_grp;
        struct mlx5_flow_group *miss_grp;
        struct mlx5_flow_handle *flow_rule;
@@ -114,11 +118,16 @@ struct mlx5_ct_tuple {
        u16 zone;
 };
 
+struct mlx5_ct_shared_counter {
+       struct mlx5_fc *counter;
+       refcount_t refcount;
+};
+
 struct mlx5_ct_entry {
        struct rhash_head node;
        struct rhash_head tuple_node;
        struct rhash_head tuple_nat_node;
-       struct mlx5_fc *counter;
+       struct mlx5_ct_shared_counter *shared_counter;
        unsigned long cookie;
        unsigned long restore_cookie;
        struct mlx5_ct_tuple tuple;
@@ -157,18 +166,6 @@ static const struct rhashtable_params tuples_nat_ht_params = {
        .min_size = 16 * 1024,
 };
 
-static struct mlx5_tc_ct_priv *
-mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv)
-{
-       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-       struct mlx5_rep_uplink_priv *uplink_priv;
-       struct mlx5e_rep_priv *uplink_rpriv;
-
-       uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
-       uplink_priv = &uplink_rpriv->uplink_priv;
-       return uplink_priv->ct_priv;
-}
-
 static int
 mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
 {
@@ -396,21 +393,31 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
        return 0;
 }
 
+static void
+mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
+{
+       if (!refcount_dec_and_test(&entry->shared_counter->refcount))
+               return;
+
+       mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
+       kfree(entry->shared_counter);
+}
+
 static void
 mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
                          struct mlx5_ct_entry *entry,
                          bool nat)
 {
        struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
-       struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
-       struct mlx5_eswitch *esw = ct_priv->esw;
+       struct mlx5_flow_attr *attr = zone_rule->attr;
 
        ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
 
-       mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
-       mlx5e_mod_hdr_detach(ct_priv->esw->dev,
-                            &esw->offloads.mod_hdr, zone_rule->mh);
+       mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
+       mlx5e_mod_hdr_detach(ct_priv->dev,
+                            ct_priv->mod_hdr_tbl, zone_rule->mh);
        mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+       kfree(attr);
 }
 
 static void
@@ -419,8 +426,6 @@ mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
 {
        mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
        mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
-
-       mlx5_fc_destroy(ct_priv->esw->dev, entry->counter);
 }
 
 static struct flow_action_entry *
@@ -446,29 +451,40 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
                               u32 labels_id,
                               u8 zone_restore_id)
 {
-       struct mlx5_eswitch *esw = ct_priv->esw;
+       enum mlx5_flow_namespace_type ns = ct_priv->ns_type;
+       struct mlx5_core_dev *dev = ct_priv->dev;
        int err;
 
-       err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+       err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
                                        CTSTATE_TO_REG, ct_state);
        if (err)
                return err;
 
-       err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+       err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
                                        MARK_TO_REG, mark);
        if (err)
                return err;
 
-       err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+       err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
                                        LABELS_TO_REG, labels_id);
        if (err)
                return err;
 
-       err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+       err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
                                        ZONE_RESTORE_TO_REG, zone_restore_id);
        if (err)
                return err;
 
+       /* Make another copy of zone id in reg_b for
+        * NIC rx flows since we don't copy reg_c1 to
+        * reg_b upon miss.
+        */
+       if (ns != MLX5_FLOW_NAMESPACE_FDB) {
+               err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
+                                               NIC_ZONE_RESTORE_TO_REG, zone_restore_id);
+               if (err)
+                       return err;
+       }
        return 0;
 }
 
@@ -549,7 +565,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
                            struct mlx5e_tc_mod_hdr_acts *mod_acts)
 {
        struct flow_action *flow_action = &flow_rule->action;
-       struct mlx5_core_dev *mdev = ct_priv->esw->dev;
+       struct mlx5_core_dev *mdev = ct_priv->dev;
        struct flow_action_entry *act;
        size_t action_size;
        char *modact;
@@ -560,8 +576,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
        flow_action_for_each(i, act, flow_action) {
                switch (act->id) {
                case FLOW_ACTION_MANGLE: {
-                       err = alloc_mod_hdr_actions(mdev,
-                                                   MLX5_FLOW_NAMESPACE_FDB,
+                       err = alloc_mod_hdr_actions(mdev, ct_priv->ns_type,
                                                    mod_acts);
                        if (err)
                                return err;
@@ -590,7 +605,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
 
 static int
 mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
-                               struct mlx5_esw_flow_attr *attr,
+                               struct mlx5_flow_attr *attr,
                                struct flow_rule *flow_rule,
                                struct mlx5e_mod_hdr_handle **mh,
                                u8 zone_restore_id, bool nat)
@@ -626,9 +641,9 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
        if (err)
                goto err_mapping;
 
-       *mh = mlx5e_mod_hdr_attach(ct_priv->esw->dev,
-                                  &ct_priv->esw->offloads.mod_hdr,
-                                  MLX5_FLOW_NAMESPACE_FDB,
+       *mh = mlx5e_mod_hdr_attach(ct_priv->dev,
+                                  ct_priv->mod_hdr_tbl,
+                                  ct_priv->ns_type,
                                   &mod_acts);
        if (IS_ERR(*mh)) {
                err = PTR_ERR(*mh);
@@ -652,9 +667,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
                          bool nat, u8 zone_restore_id)
 {
        struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
-       struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
-       struct mlx5_eswitch *esw = ct_priv->esw;
+       struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
        struct mlx5_flow_spec *spec = NULL;
+       struct mlx5_flow_attr *attr;
        int err;
 
        zone_rule->nat = nat;
@@ -663,6 +678,12 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
        if (!spec)
                return -ENOMEM;
 
+       attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+       if (!attr) {
+               err = -ENOMEM;
+               goto err_attr;
+       }
+
        err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
                                              &zone_rule->mh,
                                              zone_restore_id, nat);
@@ -676,9 +697,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
                       MLX5_FLOW_CONTEXT_ACTION_COUNT;
        attr->dest_chain = 0;
        attr->dest_ft = ct_priv->post_ct;
-       attr->fdb = nat ? ct_priv->ct_nat : ct_priv->ct;
+       attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
        attr->outer_match_level = MLX5_MATCH_L4;
-       attr->counter = entry->counter;
+       attr->counter = entry->shared_counter->counter;
        attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
 
        mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
@@ -686,39 +707,100 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
                                    entry->tuple.zone & MLX5_CT_ZONE_MASK,
                                    MLX5_CT_ZONE_MASK);
 
-       zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+       zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
        if (IS_ERR(zone_rule->rule)) {
                err = PTR_ERR(zone_rule->rule);
                ct_dbg("Failed to add ct entry rule, nat: %d", nat);
                goto err_rule;
        }
 
+       zone_rule->attr = attr;
+
        kfree(spec);
        ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone);
 
        return 0;
 
 err_rule:
-       mlx5e_mod_hdr_detach(ct_priv->esw->dev,
-                            &esw->offloads.mod_hdr, zone_rule->mh);
+       mlx5e_mod_hdr_detach(ct_priv->dev,
+                            ct_priv->mod_hdr_tbl, zone_rule->mh);
        mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
 err_mod_hdr:
+       kfree(attr);
+err_attr:
        kfree(spec);
        return err;
 }
 
+static struct mlx5_ct_shared_counter *
+mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
+                             struct mlx5_ct_entry *entry)
+{
+       struct mlx5_ct_tuple rev_tuple = entry->tuple;
+       struct mlx5_ct_shared_counter *shared_counter;
+       struct mlx5_core_dev *dev = ct_priv->dev;
+       struct mlx5_ct_entry *rev_entry;
+       __be16 tmp_port;
+       int ret;
+
+       /* get the reversed tuple */
+       tmp_port = rev_tuple.port.src;
+       rev_tuple.port.src = rev_tuple.port.dst;
+       rev_tuple.port.dst = tmp_port;
+
+       if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+               __be32 tmp_addr = rev_tuple.ip.src_v4;
+
+               rev_tuple.ip.src_v4 = rev_tuple.ip.dst_v4;
+               rev_tuple.ip.dst_v4 = tmp_addr;
+       } else if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+               struct in6_addr tmp_addr = rev_tuple.ip.src_v6;
+
+               rev_tuple.ip.src_v6 = rev_tuple.ip.dst_v6;
+               rev_tuple.ip.dst_v6 = tmp_addr;
+       } else {
+               return ERR_PTR(-EOPNOTSUPP);
+       }
+
+       /* Use the same counter as the reverse direction */
+       mutex_lock(&ct_priv->shared_counter_lock);
+       rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
+                                          tuples_ht_params);
+       if (rev_entry) {
+               if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
+                       mutex_unlock(&ct_priv->shared_counter_lock);
+                       return rev_entry->shared_counter;
+               }
+       }
+       mutex_unlock(&ct_priv->shared_counter_lock);
+
+       shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
+       if (!shared_counter)
+               return ERR_PTR(-ENOMEM);
+
+       shared_counter->counter = mlx5_fc_create(dev, true);
+       if (IS_ERR(shared_counter->counter)) {
+               ct_dbg("Failed to create counter for ct entry");
+               ret = PTR_ERR(shared_counter->counter);
+               kfree(shared_counter);
+               return ERR_PTR(ret);
+       }
+
+       refcount_set(&shared_counter->refcount, 1);
+       return shared_counter;
+}
+
 static int
 mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
                           struct flow_rule *flow_rule,
                           struct mlx5_ct_entry *entry,
                           u8 zone_restore_id)
 {
-       struct mlx5_eswitch *esw = ct_priv->esw;
        int err;
 
-       entry->counter = mlx5_fc_create(esw->dev, true);
-       if (IS_ERR(entry->counter)) {
-               err = PTR_ERR(entry->counter);
+       entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
+       if (IS_ERR(entry->shared_counter)) {
+               err = PTR_ERR(entry->shared_counter);
                ct_dbg("Failed to create counter for ct entry");
                return err;
        }
@@ -738,7 +820,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
 err_nat:
        mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
 err_orig:
-       mlx5_fc_destroy(esw->dev, entry->counter);
+       mlx5_tc_ct_shared_counter_put(ct_priv, entry);
        return err;
 }
 
@@ -828,12 +910,16 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
                        struct mlx5_ct_entry *entry)
 {
        mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+       mutex_lock(&ct_priv->shared_counter_lock);
        if (entry->tuple_node.next)
                rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
                                       &entry->tuple_nat_node,
                                       tuples_nat_ht_params);
        rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
                               tuples_ht_params);
+       mutex_unlock(&ct_priv->shared_counter_lock);
+       mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+
 }
 
 static int
@@ -870,7 +956,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
        if (!entry)
                return -ENOENT;
 
-       mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse);
+       mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
        flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
                          FLOW_ACTION_HW_STATS_DELAYED);
 
@@ -943,9 +1029,7 @@ out:
        return false;
 }
 
-int
-mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
-                           struct mlx5_flow_spec *spec)
+int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
 {
        u32 ctstate = 0, ctstate_mask = 0;
 
@@ -961,24 +1045,21 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
        return 0;
 }
 
-void mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr)
+void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_attr)
 {
-       struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
-
-       if (!ct_priv || !ct_attr->ct_labels_id)
+       if (!priv || !ct_attr->ct_labels_id)
                return;
 
-       mapping_remove(ct_priv->labels_mapping, ct_attr->ct_labels_id);
+       mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id);
 }
 
 int
-mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
+mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
                     struct mlx5_flow_spec *spec,
                     struct flow_cls_offload *f,
                     struct mlx5_ct_attr *ct_attr,
                     struct netlink_ext_ack *extack)
 {
-       struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
        struct flow_rule *rule = flow_cls_offload_flow_rule(f);
        struct flow_dissector_key_ct *mask, *key;
        bool trk, est, untrk, unest, new;
@@ -991,7 +1072,7 @@ mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
        if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
                return 0;
 
-       if (!ct_priv) {
+       if (!priv) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "offload of ct matching isn't available");
                return -EOPNOTSUPP;
@@ -1047,7 +1128,7 @@ mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
                ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
                ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
                ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
-               if (mapping_add(ct_priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
+               if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
                        return -EOPNOTSUPP;
                mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
                                            MLX5_CT_LABELS_MASK);
@@ -1057,14 +1138,12 @@ mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
 }
 
 int
-mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
-                       struct mlx5_esw_flow_attr *attr,
+mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
+                       struct mlx5_flow_attr *attr,
                        const struct flow_action_entry *act,
                        struct netlink_ext_ack *extack)
 {
-       struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
-
-       if (!ct_priv) {
+       if (!priv) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "offload of ct action isn't available");
                return -EOPNOTSUPP;
@@ -1083,8 +1162,8 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
 {
        struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
        struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
-       struct mlx5_core_dev *dev = ct_priv->esw->dev;
-       struct mlx5_flow_table *fdb = pre_ct->fdb;
+       struct mlx5_core_dev *dev = ct_priv->dev;
+       struct mlx5_flow_table *ft = pre_ct->ft;
        struct mlx5_flow_destination dest = {};
        struct mlx5_flow_act flow_act = {};
        struct mlx5_modify_hdr *mod_hdr;
@@ -1099,14 +1178,14 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
                return -ENOMEM;
 
        zone = ct_ft->zone & MLX5_CT_ZONE_MASK;
-       err = mlx5e_tc_match_to_reg_set(dev, &pre_mod_acts, ZONE_TO_REG, zone);
+       err = mlx5e_tc_match_to_reg_set(dev, &pre_mod_acts, ct_priv->ns_type,
+                                       ZONE_TO_REG, zone);
        if (err) {
                ct_dbg("Failed to set zone register mapping");
                goto err_mapping;
        }
 
-       mod_hdr = mlx5_modify_header_alloc(dev,
-                                          MLX5_FLOW_NAMESPACE_FDB,
+       mod_hdr = mlx5_modify_header_alloc(dev, ct_priv->ns_type,
                                           pre_mod_acts.num_actions,
                                           pre_mod_acts.actions);
 
@@ -1132,7 +1211,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
        mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, ctstate, ctstate);
 
        dest.ft = ct_priv->post_ct;
-       rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1);
+       rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
        if (IS_ERR(rule)) {
                err = PTR_ERR(rule);
                ct_dbg("Failed to add pre ct flow rule zone %d", zone);
@@ -1143,7 +1222,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
        /* add miss rule */
        memset(spec, 0, sizeof(*spec));
        dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct;
-       rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1);
+       rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
        if (IS_ERR(rule)) {
                err = PTR_ERR(rule);
                ct_dbg("Failed to add pre ct miss rule zone %d", zone);
@@ -1170,7 +1249,7 @@ tc_ct_pre_ct_del_rules(struct mlx5_ct_ft *ct_ft,
                       struct mlx5_tc_ct_pre *pre_ct)
 {
        struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
-       struct mlx5_core_dev *dev = ct_priv->esw->dev;
+       struct mlx5_core_dev *dev = ct_priv->dev;
 
        mlx5_del_flow_rules(pre_ct->flow_rule);
        mlx5_del_flow_rules(pre_ct->miss_rule);
@@ -1184,7 +1263,7 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
 {
        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
        struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
-       struct mlx5_core_dev *dev = ct_priv->esw->dev;
+       struct mlx5_core_dev *dev = ct_priv->dev;
        struct mlx5_flow_table_attr ft_attr = {};
        struct mlx5_flow_namespace *ns;
        struct mlx5_flow_table *ft;
@@ -1194,10 +1273,10 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
        void *misc;
        int err;
 
-       ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+       ns = mlx5_get_flow_namespace(dev, ct_priv->ns_type);
        if (!ns) {
                err = -EOPNOTSUPP;
-               ct_dbg("Failed to get FDB flow namespace");
+               ct_dbg("Failed to get flow namespace");
                return err;
        }
 
@@ -1206,7 +1285,8 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
                return -ENOMEM;
 
        ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
-       ft_attr.prio = FDB_TC_OFFLOAD;
+       ft_attr.prio =  ct_priv->ns_type ==  MLX5_FLOW_NAMESPACE_FDB ?
+                       FDB_TC_OFFLOAD : MLX5E_TC_PRIO;
        ft_attr.max_fte = 2;
        ft_attr.level = 1;
        ft = mlx5_create_flow_table(ns, &ft_attr);
@@ -1215,7 +1295,7 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
                ct_dbg("Failed to create pre ct table");
                goto out_free;
        }
-       pre_ct->fdb = ft;
+       pre_ct->ft = ft;
 
        /* create flow group */
        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
@@ -1279,7 +1359,7 @@ mlx5_tc_ct_free_pre_ct(struct mlx5_ct_ft *ct_ft,
        tc_ct_pre_ct_del_rules(ct_ft, pre_ct);
        mlx5_destroy_flow_group(pre_ct->miss_grp);
        mlx5_destroy_flow_group(pre_ct->flow_grp);
-       mlx5_destroy_flow_table(pre_ct->fdb);
+       mlx5_destroy_flow_table(pre_ct->ft);
 }
 
 static int
@@ -1398,7 +1478,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
 /* We translate the tc filter with CT action to the following HW model:
  *
  * +---------------------+
- * + fdb prio (tc chain) +
+ * + ft prio (tc chain) +
  * + original match      +
  * +---------------------+
  *      | set chain miss mapping
@@ -1428,17 +1508,17 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
  * +--------------+
  */
 static struct mlx5_flow_handle *
-__mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
                          struct mlx5e_tc_flow *flow,
                          struct mlx5_flow_spec *orig_spec,
-                         struct mlx5_esw_flow_attr *attr)
+                         struct mlx5_flow_attr *attr)
 {
-       struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
        bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
+       struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
        struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
+       u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
        struct mlx5_flow_spec *post_ct_spec = NULL;
-       struct mlx5_eswitch *esw = ct_priv->esw;
-       struct mlx5_esw_flow_attr *pre_ct_attr;
+       struct mlx5_flow_attr *pre_ct_attr;
        struct mlx5_modify_hdr *mod_hdr;
        struct mlx5_flow_handle *rule;
        struct mlx5_ct_flow *ct_flow;
@@ -1473,10 +1553,22 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
        }
        ct_flow->fte_id = fte_id;
 
-       /* Base esw attributes of both rules on original rule attribute */
-       pre_ct_attr = &ct_flow->pre_ct_attr;
-       memcpy(pre_ct_attr, attr, sizeof(*attr));
-       memcpy(&ct_flow->post_ct_attr, attr, sizeof(*attr));
+       /* Base flow attributes of both rules on original rule attribute */
+       ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+       if (!ct_flow->pre_ct_attr) {
+               err = -ENOMEM;
+               goto err_alloc_pre;
+       }
+
+       ct_flow->post_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+       if (!ct_flow->post_ct_attr) {
+               err = -ENOMEM;
+               goto err_alloc_post;
+       }
+
+       pre_ct_attr = ct_flow->pre_ct_attr;
+       memcpy(pre_ct_attr, attr, attr_sz);
+       memcpy(ct_flow->post_ct_attr, attr, attr_sz);
 
        /* Modify the original rule's action to fwd and modify, leave decap */
        pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
@@ -1487,22 +1579,22 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
         * don't go though all prios of this chain as normal tc rules
         * miss.
         */
-       err = mlx5_esw_chains_get_chain_mapping(esw, attr->chain,
-                                               &chain_mapping);
+       err = mlx5_chains_get_chain_mapping(ct_priv->chains, attr->chain,
+                                           &chain_mapping);
        if (err) {
                ct_dbg("Failed to get chain register mapping for chain");
                goto err_get_chain;
        }
        ct_flow->chain_mapping = chain_mapping;
 
-       err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+       err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
                                        CHAIN_TO_REG, chain_mapping);
        if (err) {
                ct_dbg("Failed to set chain register mapping");
                goto err_mapping;
        }
 
-       err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+       err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
                                        FTEID_TO_REG, fte_id);
        if (err) {
                ct_dbg("Failed to set fte_id register mapping");
@@ -1516,7 +1608,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
            attr->chain == 0) {
                u32 tun_id = mlx5e_tc_get_flow_tun_id(flow);
 
-               err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+               err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts,
+                                               ct_priv->ns_type,
                                                TUNNEL_TO_REG,
                                                tun_id);
                if (err) {
@@ -1525,8 +1618,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
                }
        }
 
-       mod_hdr = mlx5_modify_header_alloc(esw->dev,
-                                          MLX5_FLOW_NAMESPACE_FDB,
+       mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
                                           pre_mod_acts.num_actions,
                                           pre_mod_acts.actions);
        if (IS_ERR(mod_hdr)) {
@@ -1542,16 +1634,16 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
        mlx5e_tc_match_to_reg_match(post_ct_spec, FTEID_TO_REG,
                                    fte_id, MLX5_FTE_ID_MASK);
 
-       /* Put post_ct rule on post_ct fdb */
-       ct_flow->post_ct_attr.chain = 0;
-       ct_flow->post_ct_attr.prio = 0;
-       ct_flow->post_ct_attr.fdb = ct_priv->post_ct;
+       /* Put post_ct rule on post_ct flow table */
+       ct_flow->post_ct_attr->chain = 0;
+       ct_flow->post_ct_attr->prio = 0;
+       ct_flow->post_ct_attr->ft = ct_priv->post_ct;
 
-       ct_flow->post_ct_attr.inner_match_level = MLX5_MATCH_NONE;
-       ct_flow->post_ct_attr.outer_match_level = MLX5_MATCH_NONE;
-       ct_flow->post_ct_attr.action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
-       rule = mlx5_eswitch_add_offloaded_rule(esw, post_ct_spec,
-                                              &ct_flow->post_ct_attr);
+       ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE;
+       ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE;
+       ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
+       rule = mlx5_tc_rule_insert(priv, post_ct_spec,
+                                  ct_flow->post_ct_attr);
        ct_flow->post_ct_rule = rule;
        if (IS_ERR(ct_flow->post_ct_rule)) {
                err = PTR_ERR(ct_flow->post_ct_rule);
@@ -1561,10 +1653,9 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
 
        /* Change original rule point to ct table */
        pre_ct_attr->dest_chain = 0;
-       pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.fdb : ft->pre_ct.fdb;
-       ct_flow->pre_ct_rule = mlx5_eswitch_add_offloaded_rule(esw,
-                                                              orig_spec,
-                                                              pre_ct_attr);
+       pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
+       ct_flow->pre_ct_rule = mlx5_tc_rule_insert(priv, orig_spec,
+                                                  pre_ct_attr);
        if (IS_ERR(ct_flow->pre_ct_rule)) {
                err = PTR_ERR(ct_flow->pre_ct_rule);
                ct_dbg("Failed to add pre ct rule");
@@ -1578,14 +1669,18 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
        return rule;
 
 err_insert_orig:
-       mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule,
-                                       &ct_flow->post_ct_attr);
+       mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule,
+                           ct_flow->post_ct_attr);
 err_insert_post_ct:
        mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
 err_mapping:
        dealloc_mod_hdr_actions(&pre_mod_acts);
-       mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+       mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
 err_get_chain:
+       kfree(ct_flow->post_ct_attr);
+err_alloc_post:
+       kfree(ct_flow->pre_ct_attr);
+err_alloc_pre:
        idr_remove(&ct_priv->fte_ids, fte_id);
 err_idr:
        mlx5_tc_ct_del_ft_cb(ct_priv, ft);
@@ -1597,14 +1692,14 @@ err_ft:
 }
 
 static struct mlx5_flow_handle *
-__mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
+__mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
                                struct mlx5_flow_spec *orig_spec,
-                               struct mlx5_esw_flow_attr *attr,
+                               struct mlx5_flow_attr *attr,
                                struct mlx5e_tc_mod_hdr_acts *mod_acts)
 {
-       struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
-       struct mlx5_eswitch *esw = ct_priv->esw;
-       struct mlx5_esw_flow_attr *pre_ct_attr;
+       struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
+       u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
+       struct mlx5_flow_attr *pre_ct_attr;
        struct mlx5_modify_hdr *mod_hdr;
        struct mlx5_flow_handle *rule;
        struct mlx5_ct_flow *ct_flow;
@@ -1615,8 +1710,13 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
                return ERR_PTR(-ENOMEM);
 
        /* Base esw attributes on original rule attribute */
-       pre_ct_attr = &ct_flow->pre_ct_attr;
-       memcpy(pre_ct_attr, attr, sizeof(*attr));
+       pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+       if (!pre_ct_attr) {
+               err = -ENOMEM;
+               goto err_attr;
+       }
+
+       memcpy(pre_ct_attr, attr, attr_sz);
 
        err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0);
        if (err) {
@@ -1624,8 +1724,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
                goto err_set_registers;
        }
 
-       mod_hdr = mlx5_modify_header_alloc(esw->dev,
-                                          MLX5_FLOW_NAMESPACE_FDB,
+       mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
                                           mod_acts->num_actions,
                                           mod_acts->actions);
        if (IS_ERR(mod_hdr)) {
@@ -1638,7 +1737,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
        pre_ct_attr->modify_hdr = mod_hdr;
        pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
 
-       rule = mlx5_eswitch_add_offloaded_rule(esw, orig_spec, pre_ct_attr);
+       rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr);
        if (IS_ERR(rule)) {
                err = PTR_ERR(rule);
                ct_dbg("Failed to add ct clear rule");
@@ -1646,6 +1745,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
        }
 
        attr->ct_attr.ct_flow = ct_flow;
+       ct_flow->pre_ct_attr = pre_ct_attr;
        ct_flow->pre_ct_rule = rule;
        return rule;
 
@@ -1654,61 +1754,67 @@ err_insert:
 err_set_registers:
        netdev_warn(priv->netdev,
                    "Failed to offload ct clear flow, err %d\n", err);
+       kfree(pre_ct_attr);
+err_attr:
+       kfree(ct_flow);
+
        return ERR_PTR(err);
 }
 
 struct mlx5_flow_handle *
-mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
                        struct mlx5e_tc_flow *flow,
                        struct mlx5_flow_spec *spec,
-                       struct mlx5_esw_flow_attr *attr,
+                       struct mlx5_flow_attr *attr,
                        struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
 {
        bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
-       struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
        struct mlx5_flow_handle *rule;
 
-       if (!ct_priv)
+       if (!priv)
                return ERR_PTR(-EOPNOTSUPP);
 
-       mutex_lock(&ct_priv->control_lock);
+       mutex_lock(&priv->control_lock);
 
        if (clear_action)
                rule = __mlx5_tc_ct_flow_offload_clear(priv, spec, attr, mod_hdr_acts);
        else
                rule = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr);
-       mutex_unlock(&ct_priv->control_lock);
+       mutex_unlock(&priv->control_lock);
 
        return rule;
 }
 
 static void
 __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
+                        struct mlx5e_tc_flow *flow,
                         struct mlx5_ct_flow *ct_flow)
 {
-       struct mlx5_esw_flow_attr *pre_ct_attr = &ct_flow->pre_ct_attr;
-       struct mlx5_eswitch *esw = ct_priv->esw;
+       struct mlx5_flow_attr *pre_ct_attr = ct_flow->pre_ct_attr;
+       struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
 
-       mlx5_eswitch_del_offloaded_rule(esw, ct_flow->pre_ct_rule,
-                                       pre_ct_attr);
-       mlx5_modify_header_dealloc(esw->dev, pre_ct_attr->modify_hdr);
+       mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule,
+                           pre_ct_attr);
+       mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
 
        if (ct_flow->post_ct_rule) {
-               mlx5_eswitch_del_offloaded_rule(esw, ct_flow->post_ct_rule,
-                                               &ct_flow->post_ct_attr);
-               mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+               mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule,
+                                   ct_flow->post_ct_attr);
+               mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
                idr_remove(&ct_priv->fte_ids, ct_flow->fte_id);
                mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
        }
 
+       kfree(ct_flow->pre_ct_attr);
+       kfree(ct_flow->post_ct_attr);
        kfree(ct_flow);
 }
 
 void
-mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow,
-                      struct mlx5_esw_flow_attr *attr)
+mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
+                      struct mlx5e_tc_flow *flow,
+                      struct mlx5_flow_attr *attr)
 {
-       struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
        struct mlx5_ct_flow *ct_flow = attr->ct_attr.ct_flow;
 
        /* We are called on error to clean up stuff from parsing
@@ -1717,22 +1823,15 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow,
        if (!ct_flow)
                return;
 
-       mutex_lock(&ct_priv->control_lock);
-       __mlx5_tc_ct_delete_flow(ct_priv, ct_flow);
-       mutex_unlock(&ct_priv->control_lock);
+       mutex_lock(&priv->control_lock);
+       __mlx5_tc_ct_delete_flow(priv, flow, ct_flow);
+       mutex_unlock(&priv->control_lock);
 }
 
 static int
-mlx5_tc_ct_init_check_support(struct mlx5_eswitch *esw,
-                             const char **err_msg)
+mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
+                                 const char **err_msg)
 {
-#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
-       /* cannot restore chain ID on HW miss */
-
-       *err_msg = "tc skb extension missing";
-       return -EOPNOTSUPP;
-#endif
-
        if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) {
                *err_msg = "firmware level support is missing";
                return -EOPNOTSUPP;
@@ -1766,44 +1865,61 @@ mlx5_tc_ct_init_check_support(struct mlx5_eswitch *esw,
        return 0;
 }
 
-static void
-mlx5_tc_ct_init_err(struct mlx5e_rep_priv *rpriv, const char *msg, int err)
+static int
+mlx5_tc_ct_init_check_nic_support(struct mlx5e_priv *priv,
+                                 const char **err_msg)
+{
+       if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+               *err_msg = "firmware level support is missing";
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int
+mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
+                             enum mlx5_flow_namespace_type ns_type,
+                             const char **err_msg)
 {
-       if (msg)
-               netdev_warn(rpriv->netdev,
-                           "tc ct offload not supported, %s, err: %d\n",
-                           msg, err);
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+       /* cannot restore chain ID on HW miss */
+
+       *err_msg = "tc skb extension missing";
+       return -EOPNOTSUPP;
+#endif
+       if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+               return mlx5_tc_ct_init_check_esw_support(esw, err_msg);
        else
-               netdev_warn(rpriv->netdev,
-                           "tc ct offload not supported, err: %d\n",
-                           err);
+               return mlx5_tc_ct_init_check_nic_support(priv, err_msg);
 }
 
-int
-mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
+#define INIT_ERR_PREFIX "tc ct offload init failed"
+
+struct mlx5_tc_ct_priv *
+mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+               struct mod_hdr_tbl *mod_hdr,
+               enum mlx5_flow_namespace_type ns_type)
 {
        struct mlx5_tc_ct_priv *ct_priv;
-       struct mlx5e_rep_priv *rpriv;
-       struct mlx5_eswitch *esw;
-       struct mlx5e_priv *priv;
+       struct mlx5_core_dev *dev;
        const char *msg;
        int err;
 
-       rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
-       priv = netdev_priv(rpriv->netdev);
-       esw = priv->mdev->priv.eswitch;
-
-       err = mlx5_tc_ct_init_check_support(esw, &msg);
+       dev = priv->mdev;
+       err = mlx5_tc_ct_init_check_support(priv, ns_type, &msg);
        if (err) {
-               mlx5_tc_ct_init_err(rpriv, msg, err);
+               mlx5_core_warn(dev,
+                              "tc ct offload not supported, %s\n",
+                              msg);
                goto err_support;
        }
 
        ct_priv = kzalloc(sizeof(*ct_priv), GFP_KERNEL);
-       if (!ct_priv) {
-               mlx5_tc_ct_init_err(rpriv, NULL, -ENOMEM);
+       if (!ct_priv)
                goto err_alloc;
-       }
 
        ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true);
        if (IS_ERR(ct_priv->zone_mapping)) {
@@ -1817,46 +1933,51 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
                goto err_mapping_labels;
        }
 
-       ct_priv->esw = esw;
-       ct_priv->netdev = rpriv->netdev;
-       ct_priv->ct = mlx5_esw_chains_create_global_table(esw);
+       ct_priv->ns_type = ns_type;
+       ct_priv->chains = chains;
+       ct_priv->netdev = priv->netdev;
+       ct_priv->dev = priv->mdev;
+       ct_priv->mod_hdr_tbl = mod_hdr;
+       ct_priv->ct = mlx5_chains_create_global_table(chains);
        if (IS_ERR(ct_priv->ct)) {
                err = PTR_ERR(ct_priv->ct);
-               mlx5_tc_ct_init_err(rpriv, "failed to create ct table", err);
+               mlx5_core_warn(dev,
+                              "%s, failed to create ct table err: %d\n",
+                              INIT_ERR_PREFIX, err);
                goto err_ct_tbl;
        }
 
-       ct_priv->ct_nat = mlx5_esw_chains_create_global_table(esw);
+       ct_priv->ct_nat = mlx5_chains_create_global_table(chains);
        if (IS_ERR(ct_priv->ct_nat)) {
                err = PTR_ERR(ct_priv->ct_nat);
-               mlx5_tc_ct_init_err(rpriv, "failed to create ct nat table",
-                                   err);
+               mlx5_core_warn(dev,
+                              "%s, failed to create ct nat table err: %d\n",
+                              INIT_ERR_PREFIX, err);
                goto err_ct_nat_tbl;
        }
 
-       ct_priv->post_ct = mlx5_esw_chains_create_global_table(esw);
+       ct_priv->post_ct = mlx5_chains_create_global_table(chains);
        if (IS_ERR(ct_priv->post_ct)) {
                err = PTR_ERR(ct_priv->post_ct);
-               mlx5_tc_ct_init_err(rpriv, "failed to create post ct table",
-                                   err);
+               mlx5_core_warn(dev,
+                              "%s, failed to create post ct table err: %d\n",
+                              INIT_ERR_PREFIX, err);
                goto err_post_ct_tbl;
        }
 
        idr_init(&ct_priv->fte_ids);
        mutex_init(&ct_priv->control_lock);
+       mutex_init(&ct_priv->shared_counter_lock);
        rhashtable_init(&ct_priv->zone_ht, &zone_params);
        rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
        rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
 
-       /* Done, set ct_priv to know it initializted */
-       uplink_priv->ct_priv = ct_priv;
-
-       return 0;
+       return ct_priv;
 
 err_post_ct_tbl:
-       mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct_nat);
+       mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
 err_ct_nat_tbl:
-       mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct);
+       mlx5_chains_destroy_global_table(chains, ct_priv->ct);
 err_ct_tbl:
        mapping_destroy(ct_priv->labels_mapping);
 err_mapping_labels:
@@ -1866,20 +1987,22 @@ err_mapping_zone:
 err_alloc:
 err_support:
 
-       return 0;
+       return NULL;
 }
 
 void
-mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
+mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
 {
-       struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
+       struct mlx5_fs_chains *chains;
 
        if (!ct_priv)
                return;
 
-       mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->post_ct);
-       mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat);
-       mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct);
+       chains = ct_priv->chains;
+
+       mlx5_chains_destroy_global_table(chains, ct_priv->post_ct);
+       mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
+       mlx5_chains_destroy_global_table(chains, ct_priv->ct);
        mapping_destroy(ct_priv->zone_mapping);
        mapping_destroy(ct_priv->labels_mapping);
 
@@ -1887,17 +2010,15 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
        rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
        rhashtable_destroy(&ct_priv->zone_ht);
        mutex_destroy(&ct_priv->control_lock);
+       mutex_destroy(&ct_priv->shared_counter_lock);
        idr_destroy(&ct_priv->fte_ids);
        kfree(ct_priv);
-
-       uplink_priv->ct_priv = NULL;
 }
 
 bool
-mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
                         struct sk_buff *skb, u8 zone_restore_id)
 {
-       struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
        struct mlx5_ct_tuple tuple = {};
        struct mlx5_ct_entry *entry;
        u16 zone;