RDMA/mlx5: Expose register c0 for RDMA device
authorMark Bloch <mbloch@nvidia.com>
Wed, 6 Dec 2023 14:01:38 +0000 (16:01 +0200)
committerLeon Romanovsky <leon@kernel.org>
Tue, 12 Dec 2023 07:04:07 +0000 (09:04 +0200)
This patch introduces improvements for matching egress traffic sent by the
local device. When applicable, all egress traffic from the local vport is
now tagged with the provided value. This enhancement is particularly useful
for FDB steering purposes.

The primary focus of this update is facilitating the transmission of
traffic from the hypervisor to a VF. To achieve this, one must initiate an
SQ on the hypervisor and subsequently create a rule in the FDB that matches
on the eswitch manager vport and the SQN of the aforementioned SQ.

Obtaining the SQN can be had from SQ opened, and the eswitch manager vport
match can be substituted with the register c0 value exposed by this patch.

Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Michael Guralnik <michaelgur@nvidia.com>
Link: https://lore.kernel.org/r/aa4120a91c98ff1c44f1213388c744d4cb0324d6.1701871118.git.leon@kernel.org
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/mlx5/main.c
include/uapi/rdma/mlx5-abi.h

index 650a15b6cfbc3522d00fa8e38abd64a3b69f7b45..c2b557e642906fabb2bea74be8fc458d52d6f2a9 100644 (file)
@@ -818,6 +818,17 @@ static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
                                    MLX5_REG_NODE_DESC, 0, 0);
 }
 
+static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev,
+                               struct mlx5_ib_query_device_resp *resp)
+{
+       struct mlx5_eswitch *esw = mdev->priv.eswitch;
+       u16 vport = mlx5_eswitch_manager_vport(mdev);
+
+       resp->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(esw,
+                                                                     vport);
+       resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
+}
+
 static int mlx5_ib_query_device(struct ib_device *ibdev,
                                struct ib_device_attr *props,
                                struct ib_udata *uhw)
@@ -1209,6 +1220,19 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                        MLX5_CAP_GEN(mdev, log_max_dci_errored_streams);
        }
 
+       if (offsetofend(typeof(resp), reserved) <= uhw_outlen)
+               resp.response_length += sizeof(resp.reserved);
+
+       if (offsetofend(typeof(resp), reg_c0) <= uhw_outlen) {
+               struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+               resp.response_length += sizeof(resp.reg_c0);
+
+               if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS &&
+                   mlx5_eswitch_vport_match_metadata_enabled(esw))
+                       fill_esw_mgr_reg_c0(mdev, &resp);
+       }
+
        if (uhw_outlen) {
                err = ib_copy_to_udata(uhw, &resp, resp.response_length);
 
index a96b7d2770e15d10c57860f7b6c470b90b04d24a..d4f6a36dffb092e80125bea4a010be30baf4ab69 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/types.h>
 #include <linux/if_ether.h>    /* For ETH_ALEN. */
 #include <rdma/ib_user_ioctl_verbs.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
 
 enum {
        MLX5_QP_FLAG_SIGNATURE          = 1 << 0,
@@ -275,6 +276,7 @@ struct mlx5_ib_query_device_resp {
        __u32   tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */
        struct  mlx5_ib_dci_streams_caps dci_streams_caps;
        __u16 reserved;
+       struct mlx5_ib_uapi_reg reg_c0;
 };
 
 enum mlx5_ib_create_cq_flags {