net/mlx5: Introduce mlx5_cpumask_default_spread
authorMaher Sanalla <msanalla@nvidia.com>
Mon, 12 Jun 2023 08:50:10 +0000 (11:50 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Mon, 7 Aug 2023 17:53:51 +0000 (10:53 -0700)
For better code readability in the completion IRQ request code, define
the cpu lookup per completion vector logic in a separate function.

The new method mlx5_cpumask_default_spread() given a vector index 'n'
will return the 'nth' cpu. This new method will be used also in the next
patch.

Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
Reviewed-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/eq.c

index 41fa15757101ccb07318ef3a4f2c6c465486d5c4..ad654d460d0c424b8310d97361ddfff6fe9c6402 100644 (file)
@@ -826,20 +826,18 @@ static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
        mlx5_irq_release_vector(irq);
 }
 
-static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
+static int mlx5_cpumask_default_spread(int numa_node, int index)
 {
-       struct mlx5_eq_table *table = dev->priv.eq_table;
        const struct cpumask *prev = cpu_none_mask;
        const struct cpumask *mask;
-       struct mlx5_irq *irq;
        int found_cpu = 0;
        int i = 0;
        int cpu;
 
        rcu_read_lock();
-       for_each_numa_hop_mask(mask, dev->priv.numa_node) {
+       for_each_numa_hop_mask(mask, numa_node) {
                for_each_cpu_andnot(cpu, mask, prev) {
-                       if (i++ == vecidx) {
+                       if (i++ == index) {
                                found_cpu = cpu;
                                goto spread_done;
                        }
@@ -849,7 +847,17 @@ static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
 
 spread_done:
        rcu_read_unlock();
-       irq = mlx5_irq_request_vector(dev, found_cpu, vecidx, &table->rmap);
+       return found_cpu;
+}
+
+static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
+{
+       struct mlx5_eq_table *table = dev->priv.eq_table;
+       struct mlx5_irq *irq;
+       int cpu;
+
+       cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
+       irq = mlx5_irq_request_vector(dev, cpu, vecidx, &table->rmap);
        if (IS_ERR(irq))
                return PTR_ERR(irq);