net/mlx5e: Rename lro_timeout to packet_merge_timeout
authorBen Ben-Ishay <benishay@nvidia.com>
Thu, 2 Jul 2020 14:22:45 +0000 (17:22 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Wed, 27 Oct 2021 02:30:38 +0000 (19:30 -0700)
TIR stands for transport interface receive, the TIR object is
responsible for performing all transport related operations on
the receive side like packet processing, demultiplexing the packets
to different RQ's, etc.
lro_timeout is a field in the TIR that is used to set the timeout for lro
session, this series introduces new packet merge type, therefore rename
lro_timeout to packet_merge_timeout for all packet merge types.

Signed-off-by: Ben Ben-Ishay <benishay@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
include/linux/mlx5/mlx5_ifc.h

index a3a4fece0cac31754d23dc56f58550d59bfd3858..26e3f413386a7eacdbf907e6992ab88761173b6c 100644 (file)
@@ -265,7 +265,7 @@ struct mlx5e_params {
        bool scatter_fcs_en;
        bool rx_dim_enabled;
        bool tx_dim_enabled;
-       u32 lro_timeout;
+       u32 packet_merge_timeout;
        u32 pflags;
        struct bpf_prog *xdp_prog;
        struct mlx5e_xsk *xsk;
index 3cbb596821e8910d934e3c71d6d0ba48726e62e7..2b2b3c5cdbd5c383ddcedfea9de935aca2732fea 100644 (file)
@@ -173,7 +173,7 @@ struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params)
 
        lro_param = (struct mlx5e_lro_param) {
                .enabled = params->lro_en,
-               .timeout = params->lro_timeout,
+               .timeout = params->packet_merge_timeout,
        };
 
        return lro_param;
index de936dc4bc4832123bc0edb4cf0afc66af3fcbb7..857ea097915970afc4c8003aa6271e032a54adc3 100644 (file)
@@ -82,9 +82,9 @@ void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
        if (!lro_param->enabled)
                return;
 
-       MLX5_SET(tirc, tirc, lro_enable_mask,
-                MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
-                MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+       MLX5_SET(tirc, tirc, packet_merge_mask,
+                MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO |
+                MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO);
        MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
                 (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
        MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout);
index f3dec58026d945ea6ed0489b0dec9f20251b322c..0e7a8afeb9bd63f6cdd730f07d06961fc14e41a8 100644 (file)
@@ -4404,7 +4404,7 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
                if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
                        params->lro_en = !slow_pci_heuristic(mdev);
        }
-       params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+       params->packet_merge_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
 
        /* CQ moderation params */
        rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
index 746381eccccf265e2b4edf7ebd060f30963b0e71..21c0fd478afac9705a73fb491076e6c91a857b4a 100644 (file)
@@ -3361,8 +3361,8 @@ enum {
 };
 
 enum {
-       MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO  = 0x1,
-       MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO  = 0x2,
+       MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO  = BIT(0),
+       MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO  = BIT(1),
 };
 
 enum {
@@ -3387,7 +3387,7 @@ struct mlx5_ifc_tirc_bits {
 
        u8         reserved_at_80[0x4];
        u8         lro_timeout_period_usecs[0x10];
-       u8         lro_enable_mask[0x4];
+       u8         packet_merge_mask[0x4];
        u8         lro_max_ip_payload_size[0x8];
 
        u8         reserved_at_a0[0x40];