net/mlx5: Lag, filter non compatible devices
authorMark Bloch <mbloch@nvidia.com>
Sun, 27 Feb 2022 12:40:39 +0000 (12:40 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 14 Jun 2022 16:36:18 +0000 (18:36 +0200)
[ Upstream commit bc4c2f2e017949646b43fdcad005a03462d437c6 ]

When search for a peer lag device we can filter based on that
device's capabilities.

Downstream patch will be less strict when filtering compatible devices
and remove the limitation where we require exact MLX5_MAX_PORTS and
change it to a range.

Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h

index e8093c4e09d4e35a024a362ade4d30585af7c344..94411b34799ea88d2278273f5a444cc0ae9d2e8d 100644 (file)
@@ -544,12 +544,9 @@ static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
                     PCI_SLOT(dev->pdev->devfn));
 }
 
-static int next_phys_dev(struct device *dev, const void *data)
+static int _next_phys_dev(struct mlx5_core_dev *mdev,
+                         const struct mlx5_core_dev *curr)
 {
-       struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
-       struct mlx5_core_dev *mdev = madev->mdev;
-       const struct mlx5_core_dev *curr = data;
-
        if (!mlx5_core_is_pf(mdev))
                return 0;
 
@@ -562,8 +559,29 @@ static int next_phys_dev(struct device *dev, const void *data)
        return 1;
 }
 
-/* Must be called with intf_mutex held */
-struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
+static int next_phys_dev(struct device *dev, const void *data)
+{
+       struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
+       struct mlx5_core_dev *mdev = madev->mdev;
+
+       return _next_phys_dev(mdev, data);
+}
+
+static int next_phys_dev_lag(struct device *dev, const void *data)
+{
+       struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
+       struct mlx5_core_dev *mdev = madev->mdev;
+
+       if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
+           !MLX5_CAP_GEN(mdev, lag_master) ||
+           MLX5_CAP_GEN(mdev, num_lag_ports) != MLX5_MAX_PORTS)
+               return 0;
+
+       return _next_phys_dev(mdev, data);
+}
+
+static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
+                                              int (*match)(struct device *dev, const void *data))
 {
        struct auxiliary_device *adev;
        struct mlx5_adev *madev;
@@ -571,7 +589,7 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
        if (!mlx5_core_is_pf(dev))
                return NULL;
 
-       adev = auxiliary_find_device(NULL, dev, &next_phys_dev);
+       adev = auxiliary_find_device(NULL, dev, match);
        if (!adev)
                return NULL;
 
@@ -580,6 +598,20 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
        return madev->mdev;
 }
 
+/* Must be called with intf_mutex held */
+struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
+{
+       lockdep_assert_held(&mlx5_intf_mutex);
+       return mlx5_get_next_dev(dev, &next_phys_dev);
+}
+
+/* Must be called with intf_mutex held */
+struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
+{
+       lockdep_assert_held(&mlx5_intf_mutex);
+       return mlx5_get_next_dev(dev, &next_phys_dev_lag);
+}
+
 void mlx5_dev_list_lock(void)
 {
        mutex_lock(&mlx5_intf_mutex);
index c19d9327095b2e034868742108a8a35ee9a57cc8..57d86d47ec2ab1ce113cbc3e5294de13c6476a26 100644 (file)
@@ -752,12 +752,7 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
        struct mlx5_lag *ldev = NULL;
        struct mlx5_core_dev *tmp_dev;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
-           !MLX5_CAP_GEN(dev, lag_master) ||
-           MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
-               return 0;
-
-       tmp_dev = mlx5_get_next_phys_dev(dev);
+       tmp_dev = mlx5_get_next_phys_dev_lag(dev);
        if (tmp_dev)
                ldev = tmp_dev->priv.lag;
 
@@ -802,6 +797,11 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
 {
        int err;
 
+       if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+           !MLX5_CAP_GEN(dev, lag_master) ||
+           MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
+               return;
+
 recheck:
        mlx5_dev_list_lock();
        err = __mlx5_lag_dev_add_mdev(dev);
index 230eab7e3bc91a1443cd67aa54885e9dd36fe795..3f3ea8d268ce4c1298df381280320c64420c514f 100644 (file)
@@ -186,6 +186,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev);
 int mlx5_register_device(struct mlx5_core_dev *dev);
 void mlx5_unregister_device(struct mlx5_core_dev *dev);
 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
+struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
 void mlx5_dev_list_lock(void);
 void mlx5_dev_list_unlock(void);
 int mlx5_dev_list_trylock(void);