net/mlx5e: Modify slow path rules to go to slow fdb
authorVlad Buslov <vladbu@nvidia.com>
Thu, 7 Jul 2022 19:49:18 +0000 (21:49 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Thu, 28 Jul 2022 20:44:32 +0000 (13:44 -0700)
While extending available range of supported chains/prios referenced commit
also modified slow path rules to go to FT chain instead of actual slow FDB.
However neither of existing users of the MLX5_ATTR_FLAG_SLOW_PATH
flag (tunnel encap entries with invalid encap and flows with trap action)
need to match on FT chain. After bridge offload was implemented packets of
such flows can also be matched by bridge priority tables which is
undesirable. Restore slow path flows implementation to redirect packets to
slow_fdb.

Fixes: 278d51f24330 ("net/mlx5: E-Switch, Increase number of chains and priorities")
Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
Reviewed-by: Roi Dayan <roid@nvidia.com>
Reviewed-by: Paul Blakey <paulb@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c

index 2ce3728576d1a29aaf562f6456cb5d1890e0db34..eb79810199d3e10a394bd42b876cb899125586a4 100644 (file)
@@ -230,10 +230,8 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
 }
 
 static void
-esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
-                        struct mlx5_flow_act *flow_act,
-                        struct mlx5_fs_chains *chains,
-                        int i)
+esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+                     struct mlx5_fs_chains *chains, int i)
 {
        if (mlx5_chains_ignore_flow_level_supported(chains))
                flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
@@ -241,6 +239,16 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
        dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
 }
 
+static void
+esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+                        struct mlx5_eswitch *esw, int i)
+{
+       if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
+               flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+       dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       dest[i].ft = esw->fdb_table.offloads.slow_fdb;
+}
+
 static int
 esw_setup_chain_dest(struct mlx5_flow_destination *dest,
                     struct mlx5_flow_act *flow_act,
@@ -475,8 +483,11 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
        } else if (attr->dest_ft) {
                esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
                (*i)++;
-       } else if (mlx5e_tc_attr_flags_skip(attr->flags)) {
-               esw_setup_slow_path_dest(dest, flow_act, chains, *i);
+       } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
+               esw_setup_slow_path_dest(dest, flow_act, esw, *i);
+               (*i)++;
+       } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
+               esw_setup_accept_dest(dest, flow_act, chains, *i);
                (*i)++;
        } else if (attr->dest_chain) {
                err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,