bool scatter_fcs_en;
        bool rx_dim_enabled;
        bool tx_dim_enabled;
-       u32 lro_timeout;
+       u32 packet_merge_timeout;
        u32 pflags;
        struct bpf_prog *xdp_prog;
        struct mlx5e_xsk *xsk;
 
 
        lro_param = (struct mlx5e_lro_param) {
                .enabled = params->lro_en,
-               .timeout = params->lro_timeout,
+               .timeout = params->packet_merge_timeout,
        };
 
        return lro_param;
 
        if (!lro_param->enabled)
                return;
 
-       MLX5_SET(tirc, tirc, lro_enable_mask,
-                MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
-                MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+       MLX5_SET(tirc, tirc, packet_merge_mask,
+                MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO |
+                MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO);
        MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
                 (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
        MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout);
 
                if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
                        params->lro_en = !slow_pci_heuristic(mdev);
        }
-       params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+       params->packet_merge_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
 
        /* CQ moderation params */
        rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
 
 };
 
 enum {
-       MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO  = 0x1,
-       MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO  = 0x2,
+       MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO  = BIT(0),
+       MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO  = BIT(1),
 };
 
 enum {
 
        u8         reserved_at_80[0x4];
        u8         lro_timeout_period_usecs[0x10];
-       u8         lro_enable_mask[0x4];
+       u8         packet_merge_mask[0x4];
        u8         lro_max_ip_payload_size[0x8];
 
        u8         reserved_at_a0[0x40];