net/mlx5: Use cpumask_local_spread() instead of custom code
authorErwan Velu <erwanaliasr1@gmail.com>
Mon, 12 Aug 2024 08:22:42 +0000 (10:22 +0200)
committerJakub Kicinski <kuba@kernel.org>
Fri, 16 Aug 2024 01:57:01 +0000 (18:57 -0700)
Commit 2acda57736de ("net/mlx5e: Improve remote NUMA preferences used for the IRQ affinity hints")
removed the usage of cpumask_local_spread().

The issue explained in this commit was fixed by
commit 406d394abfcd ("cpumask: improve on cpumask_local_spread() locality").

Since this commit, mlx5_cpumask_default_spread() is having the same
behavior as cpumask_local_spread().

This commit is about :
- removing the specific logic and use cpumask_local_spread() instead
- passing mlx5_core_dev as argument to more flexibility

mlx5_cpumask_default_spread() is kept as it could be useful for some
future specific quirks.

Signed-off-by: Erwan Velu <e.velu@criteo.com>
Acked-by: Yury Norov <yury.norov@gmail.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20240812082244.22810-1-e.velu@criteo.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/eq.c

index cb7e7e4..f15ecae 100644 (file)
@@ -835,28 +835,9 @@ static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
        mlx5_irq_release_vector(irq);
 }
 
-static int mlx5_cpumask_default_spread(int numa_node, int index)
+static int mlx5_cpumask_default_spread(struct mlx5_core_dev *dev, int index)
 {
-       const struct cpumask *prev = cpu_none_mask;
-       const struct cpumask *mask;
-       int found_cpu = 0;
-       int i = 0;
-       int cpu;
-
-       rcu_read_lock();
-       for_each_numa_hop_mask(mask, numa_node) {
-               for_each_cpu_andnot(cpu, mask, prev) {
-                       if (i++ == index) {
-                               found_cpu = cpu;
-                               goto spread_done;
-                       }
-               }
-               prev = mask;
-       }
-
-spread_done:
-       rcu_read_unlock();
-       return found_cpu;
+       return cpumask_local_spread(index, dev->priv.numa_node);
 }
 
 static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
@@ -880,7 +861,7 @@ static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
        int cpu;
 
        rmap = mlx5_eq_table_get_pci_rmap(dev);
-       cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
+       cpu = mlx5_cpumask_default_spread(dev, vecidx);
        irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
        if (IS_ERR(irq))
                return PTR_ERR(irq);
@@ -1145,7 +1126,7 @@ int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
        if (mask)
                cpu = cpumask_first(mask);
        else
-               cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector);
+               cpu = mlx5_cpumask_default_spread(dev, vector);
 
        return cpu;
 }