eth: bnxt: use the RSS context XArray instead of the local list
authorJakub Kicinski <kuba@kernel.org>
Thu, 11 Jul 2024 22:07:10 +0000 (15:07 -0700)
committerJakub Kicinski <kuba@kernel.org>
Sat, 13 Jul 2024 05:16:22 +0000 (22:16 -0700)
Core already maintains all RSS contexts in an XArray, no need
to keep a second list in the driver.

Remove bnxt_get_max_rss_ctx_ring() completely since core performs
the same check already.

Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
Link: https://patch.msgid.link/20240711220713.283778-9-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c

index e3cc347..f9554f5 100644 (file)
@@ -5970,17 +5970,20 @@ bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
                          struct hwrm_cfa_ntuple_filter_alloc_input *req,
                          struct bnxt_ntuple_filter *fltr)
 {
-       struct bnxt_rss_ctx *rss_ctx, *tmp;
        u16 rxq = fltr->base.rxq;
 
        if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
-               list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
-                       if (rss_ctx->index == fltr->base.fw_vnic_id) {
-                               struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+               struct ethtool_rxfh_context *ctx;
+               struct bnxt_rss_ctx *rss_ctx;
+               struct bnxt_vnic_info *vnic;
 
-                               req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
-                               break;
-                       }
+               ctx = xa_load(&bp->dev->ethtool->rss_ctx,
+                             fltr->base.fw_vnic_id);
+               if (ctx) {
+                       rss_ctx = ethtool_rxfh_context_priv(ctx);
+                       vnic = &rss_ctx->vnic;
+
+                       req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
                }
                return;
        }
@@ -6282,21 +6285,6 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
        return max_ring;
 }
 
-u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp)
-{
-       u16 i, tbl_size, max_ring = 0;
-       struct bnxt_rss_ctx *rss_ctx;
-
-       tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
-
-       list_for_each_entry(rss_ctx, &bp->rss_ctx_list, list) {
-               for (i = 0; i < tbl_size; i++)
-                       max_ring = max(max_ring, rss_ctx->rss_indir_tbl[i]);
-       }
-
-       return max_ring;
-}
-
 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
 {
        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
@@ -10237,16 +10225,17 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
                                  vnic->rss_table,
                                  vnic->rss_table_dma_addr);
        kfree(rss_ctx->rss_indir_tbl);
-       list_del(&rss_ctx->list);
        bp->num_rss_ctx--;
 }
 
 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
 {
        bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
-       struct bnxt_rss_ctx *rss_ctx, *tmp;
+       struct ethtool_rxfh_context *ctx;
+       unsigned long context;
 
-       list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
+       xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+               struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
                struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
 
                if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
@@ -10262,16 +10251,14 @@ static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
 
 void bnxt_clear_rss_ctxs(struct bnxt *bp)
 {
-       struct bnxt_rss_ctx *rss_ctx, *tmp;
+       struct ethtool_rxfh_context *ctx;
+       unsigned long context;
 
-       list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
-               bnxt_del_one_rss_ctx(bp, rss_ctx, false);
-}
+       xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+               struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
 
-static void bnxt_init_multi_rss_ctx(struct bnxt *bp)
-{
-       INIT_LIST_HEAD(&bp->rss_ctx_list);
-       bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
+               bnxt_del_one_rss_ctx(bp, rss_ctx, false);
+       }
 }
 
 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
@@ -15859,8 +15846,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        INIT_LIST_HEAD(&bp->usr_fltr_list);
 
        if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
-               bnxt_init_multi_rss_ctx(bp);
-
+               bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
 
        rc = register_netdev(dev);
        if (rc)
index c5fd7a4..be40e05 100644 (file)
@@ -1291,7 +1291,6 @@ struct bnxt_vnic_info {
 };
 
 struct bnxt_rss_ctx {
-       struct list_head list;
        struct bnxt_vnic_info vnic;
        u16     *rss_indir_tbl;
        u8      index;
@@ -2330,7 +2329,6 @@ struct bnxt {
        /* grp_info indexed by completion ring index */
        struct bnxt_ring_grp_info       *grp_info;
        struct bnxt_vnic_info   *vnic_info;
-       struct list_head        rss_ctx_list;
        u32                     num_rss_ctx;
        int                     nr_vnics;
        u16                     *rss_indir_tbl;
@@ -2812,7 +2810,6 @@ int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 void bnxt_fill_ipv6_mask(__be32 mask[4]);
 int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
-u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp);
 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
index de8e134..7476558 100644 (file)
@@ -961,12 +961,6 @@ static int bnxt_set_channels(struct net_device *dev,
                return rc;
        }
 
-       if (req_rx_rings < bp->rx_nr_rings &&
-           req_rx_rings <= bnxt_get_max_rss_ctx_ring(bp)) {
-               netdev_warn(dev, "Can't deactivate rings used by RSS contexts\n");
-               return -EINVAL;
-       }
-
        if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
            bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
            netif_is_rxfh_configured(dev)) {
@@ -1216,12 +1210,12 @@ fltr_err:
 static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
                                                        u32 index)
 {
-       struct bnxt_rss_ctx *rss_ctx, *tmp;
+       struct ethtool_rxfh_context *ctx;
 
-       list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
-               if (rss_ctx->index == index)
-                       return rss_ctx;
-       return NULL;
+       ctx = xa_load(&bp->dev->ethtool->rss_ctx, index);
+       if (!ctx)
+               return NULL;
+       return ethtool_rxfh_context_priv(ctx);
 }
 
 static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp,
@@ -1909,7 +1903,6 @@ static int bnxt_create_rxfh_context(struct net_device *dev,
 
        rss_ctx = ethtool_rxfh_context_priv(ctx);
 
-       list_add_tail(&rss_ctx->list, &bp->rss_ctx_list);
        bp->num_rss_ctx++;
 
        vnic = &rss_ctx->vnic;