}
}
+ if (field_avail(typeof(resp), tunnel_offloads_caps,
+ uhw->outlen)) {
+ resp.response_length += sizeof(resp.tunnel_offloads_caps);
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GRE;
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
}
+static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
+{
+ return (MLX5_CAP_ETH(dev, tunnel_stateless_vxlan) ||
+ MLX5_CAP_ETH(dev, tunnel_stateless_gre) ||
+ MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
+}
+
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
- struct mlx5_ib_rq *rq, u32 tdn)
+ struct mlx5_ib_rq *rq, u32 tdn,
+ bool tunnel_offload_en)
{
u32 *in;
void *tirc;
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
MLX5_SET(tirc, tirc, transport_domain, tdn);
+ if (tunnel_offload_en)
+ MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
goto err_destroy_sq;
- err = create_raw_packet_qp_tir(dev, rq, tdn);
+ err = create_raw_packet_qp_tir(dev, rq, tdn,
+ qp->tunnel_offload_en);
if (err)
goto err_destroy_rq;
}
if (udata->outlen < min_resp_len)
return -EINVAL;
- required_cmd_sz = offsetof(typeof(ucmd), reserved1) + sizeof(ucmd.reserved1);
+ required_cmd_sz = offsetof(typeof(ucmd), flags) + sizeof(ucmd.flags);
if (udata->inlen < required_cmd_sz) {
mlx5_ib_dbg(dev, "invalid inlen\n");
return -EINVAL;
return -EOPNOTSUPP;
}
- if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)) || ucmd.reserved1) {
- mlx5_ib_dbg(dev, "invalid reserved\n");
+ if (ucmd.flags & ~MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
+ mlx5_ib_dbg(dev, "invalid flags\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS &&
+ !tunnel_offload_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "tunnel offloads isn't supported\n");
return -EOPNOTSUPP;
}
MLX5_SET(tirc, tirc, transport_domain, tdn);
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
+ MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
+
switch (ucmd.rx_hash_function) {
case MLX5_RX_HASH_FUNC_TOEPLITZ:
{
qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
+ if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
+ !tunnel_offload_supported(mdev)) {
+ mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+ qp->tunnel_offload_en = true;
+ }
if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
if (init_attr->qp_type != IB_QPT_UD ||
enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
+ MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
};
enum {
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
};
+enum mlx5_ib_tunnel_offloads {
+ MLX5_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0,
+ MLX5_IB_TUNNELED_OFFLOADS_GRE = 1 << 1,
+ MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2
+};
+
struct mlx5_ib_query_device_resp {
__u32 comp_mask;
__u32 response_length;
__u32 flags; /* Use enum mlx5_ib_query_dev_resp_flags */
struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
struct mlx5_ib_striding_rq_caps striding_rq_caps;
+ __u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */
+ __u32 reserved;
};
enum mlx5_ib_create_cq_flags {
__u8 reserved[6];
__u8 rx_hash_key[128]; /* valid only for Toeplitz */
__u32 comp_mask;
- __u32 reserved1;
+ __u32 flags;
};
struct mlx5_ib_create_qp_resp {