net/mlx5e: Create new flow attr for multi table actions
authorRoi Dayan <roid@nvidia.com>
Sun, 8 Aug 2021 06:38:03 +0000 (09:38 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Thu, 17 Feb 2022 07:55:16 +0000 (23:55 -0800)
Some TC actions use post actions for their implementation.
For example CT and sample actions.

Create a new flow attr after each multi table action and
create a post action rule for it.

First flow attr being offloaded normally and linked to the next
attr (post action rule) with setting an id on reg_c.
Post action rules match the id on reg_c and continue to the next one.

The flow counter is allocated on the last rule.

Signed-off-by: Roi Dayan <roid@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h

index e600924e30ea24c014025725a1ca8bd8e3231053..cb8f7593a00ccc7d6741ebd7c2b05a3e38275328 100644 (file)
@@ -2,6 +2,7 @@
 // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 
 #include "act.h"
+#include "en/tc/post_act.h"
 #include "en/tc_priv.h"
 #include "mlx5_core.h"
 
@@ -101,3 +102,75 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
        parse_state->num_actions = flow_action->num_entries;
        parse_state->extack = extack;
 }
+
+void
+mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
+                                 struct mlx5e_tc_flow_action *flow_action_reorder)
+{
+       struct flow_action_entry *act;
+       int i, j = 0;
+
+       flow_action_for_each(i, act, flow_action) {
+               /* Add CT action to be first. */
+               if (act->id == FLOW_ACTION_CT)
+                       flow_action_reorder->entries[j++] = act;
+       }
+
+       flow_action_for_each(i, act, flow_action) {
+               if (act->id == FLOW_ACTION_CT)
+                       continue;
+               flow_action_reorder->entries[j++] = act;
+       }
+}
+
+int
+mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
+                       struct flow_action *flow_action,
+                       struct mlx5_flow_attr *attr,
+                       enum mlx5_flow_namespace_type ns_type)
+{
+       struct flow_action_entry *act;
+       struct mlx5e_tc_act *tc_act;
+       struct mlx5e_priv *priv;
+       int err = 0, i;
+
+       priv = parse_state->flow->priv;
+
+       flow_action_for_each(i, act, flow_action) {
+               tc_act = mlx5e_tc_act_get(act->id, ns_type);
+               if (!tc_act || !tc_act->post_parse ||
+                   !tc_act->can_offload(parse_state, act, i, attr))
+                       continue;
+
+               err = tc_act->post_parse(parse_state, priv, attr);
+               if (err)
+                       goto out;
+       }
+
+out:
+       return err;
+}
+
+int
+mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
+                              struct mlx5_flow_attr *attr,
+                              struct mlx5_flow_attr *next_attr)
+{
+       struct mlx5_core_dev *mdev = flow->priv->mdev;
+       struct mlx5e_tc_mod_hdr_acts *mod_acts;
+       int err;
+
+       mod_acts = &attr->parse_attr->mod_hdr_acts;
+
+       /* Set handle on current post act rule to next post act rule. */
+       err = mlx5e_tc_post_act_set_handle(mdev, next_attr->post_act_handle, mod_acts);
+       if (err) {
+               mlx5_core_warn(mdev, "Failed setting post action handle");
+               return err;
+       }
+
+       attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+                       MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+       return 0;
+}
index fc7c06688b5163bbbe80f7676933c8fb797eff07..519fa1056d9fbc88c7029cd3a829c19ed29ff19a 100644 (file)
@@ -42,6 +42,15 @@ struct mlx5e_tc_act {
        int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state,
                          struct mlx5e_priv *priv,
                          struct mlx5_flow_attr *attr);
+
+       bool (*is_multi_table_act)(struct mlx5e_priv *priv,
+                                  const struct flow_action_entry *act,
+                                  struct mlx5_flow_attr *attr);
+};
+
+struct mlx5e_tc_flow_action {
+       unsigned int num_entries;
+       struct flow_action_entry **entries;
 };
 
 extern struct mlx5e_tc_act mlx5e_tc_act_drop;
@@ -74,4 +83,19 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
                              struct flow_action *flow_action,
                              struct netlink_ext_ack *extack);
 
+void
+mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
+                                 struct mlx5e_tc_flow_action *flow_action_reorder);
+
+int
+mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
+                       struct flow_action *flow_action,
+                       struct mlx5_flow_attr *attr,
+                       enum mlx5_flow_namespace_type ns_type);
+
+int
+mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
+                              struct mlx5_flow_attr *attr,
+                              struct mlx5_flow_attr *next_attr);
+
 #endif /* __MLX5_EN_TC_ACT_H__ */
index ce7ba1951e256f77ec791457a1198c20e1ad9cf8..27854ac844a0cf73c716408a847921c5fdca6603 100644 (file)
@@ -140,15 +140,9 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
                goto err_xarray;
 
        handle->attr = post_attr;
-       err = mlx5e_tc_post_act_offload(post_act, handle);
-       if (err)
-               goto err_rule;
-
 
        return handle;
 
-err_rule:
-       xa_erase(&post_act->ids, handle->id);
 err_xarray:
        kfree(post_attr);
        kfree(handle);
index 32230e6770293838731047f394e880ffb60d10b2..90b86c4aaf518de634d959c8d79632e34b874f33 100644 (file)
@@ -533,6 +533,9 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
                        err = PTR_ERR(post_act_handle);
                        goto err_post_act;
                }
+               err = mlx5e_tc_post_act_offload(tc_psample->post_act, post_act_handle);
+               if (err)
+                       goto err_post_rule;
                sample_flow->post_act_handle = post_act_handle;
        } else {
                err = add_post_rule(esw, sample_flow, spec, attr, &default_tbl_id);
index 0f4d3b9dd979d554c16c5450faccb066f5d2db15..228ebb19fcab62169984cc8e76238915bd63bb86 100644 (file)
@@ -1823,6 +1823,9 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
                ct_dbg("Failed to allocate post action handle");
                goto err_post_act_handle;
        }
+       err = mlx5e_tc_post_act_offload(ct_priv->post_act, handle);
+       if (err)
+               goto err_alloc_pre;
        ct_flow->post_act_handle = handle;
 
        /* Base flow attributes of both rules on original rule attribute */
index 9ffba584b982d593d114b5411a9c5c985aee5058..f76624699a8d1d3d6469a80ef5a278d0c8ebf949 100644 (file)
@@ -109,6 +109,7 @@ struct mlx5e_tc_flow {
        struct completion init_done;
        struct completion del_hw_done;
        struct mlx5_flow_attr *attr;
+       struct list_head attrs;
 };
 
 struct mlx5_flow_handle *
@@ -129,6 +130,12 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
                           struct mlx5_flow_spec *spec,
                           struct mlx5_flow_attr *attr);
 
+struct mlx5_flow_attr *
+mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow);
+
+void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow);
+int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow);
+
 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
index 1f8d339ff0c3f24c164d9b21fe717c49e1eac098..171bc6b36aa4005c4f27bdd24727c72c1ce1b6b5 100644 (file)
@@ -173,19 +173,29 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
        list_for_each_entry(flow, flow_list, tmp_list) {
                if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW))
                        continue;
-               attr = flow->attr;
-               esw_attr = attr->esw_attr;
-               spec = &attr->parse_attr->spec;
 
+               spec = &flow->attr->parse_attr->spec;
+
+               attr = mlx5e_tc_get_encap_attr(flow);
+               esw_attr = attr->esw_attr;
                esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat;
                esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
 
                /* Do not offload flows with unresolved neighbors */
                if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
                        continue;
+
+               err = mlx5e_tc_offload_flow_post_acts(flow);
+               if (err) {
+                       mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
+                                      err);
+                       continue;
+               }
+
                /* update from slow path rule to encap rule */
-               rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+               rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
                if (IS_ERR(rule)) {
+                       mlx5e_tc_unoffload_flow_post_acts(flow);
                        err = PTR_ERR(rule);
                        mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
                                       err);
@@ -214,12 +224,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
        list_for_each_entry(flow, flow_list, tmp_list) {
                if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
                        continue;
-               attr = flow->attr;
-               esw_attr = attr->esw_attr;
-               spec = &attr->parse_attr->spec;
+               spec = &flow->attr->parse_attr->spec;
 
                /* update from encap rule to slow path rule */
                rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
+
+               attr = mlx5e_tc_get_encap_attr(flow);
+               esw_attr = attr->esw_attr;
                /* mark the flow's encap dest as non-valid */
                esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
 
@@ -230,7 +241,8 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
                        continue;
                }
 
-               mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
+               mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
+               mlx5e_tc_unoffload_flow_post_acts(flow);
                flow->rule[0] = rule;
                /* was unset when fast path rule removed */
                flow_flag_set(flow, OFFLOADED);
@@ -495,6 +507,9 @@ void mlx5e_detach_encap(struct mlx5e_priv *priv,
        struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
 
+       if (!mlx5e_is_eswitch_flow(flow))
+               return;
+
        if (attr->esw_attr->dests[out_index].flags &
            MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
                mlx5e_detach_encap_route(priv, flow, out_index);
@@ -1360,17 +1375,19 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
 
        list_for_each_entry(flow, encap_flows, tmp_list) {
                struct mlx5e_tc_flow_parse_attr *parse_attr;
-               struct mlx5_flow_attr *attr = flow->attr;
                struct mlx5_esw_flow_attr *esw_attr;
                struct mlx5_flow_handle *rule;
+               struct mlx5_flow_attr *attr;
                struct mlx5_flow_spec *spec;
 
                if (flow_flag_test(flow, FAILED))
                        continue;
 
+               spec = &flow->attr->parse_attr->spec;
+
+               attr = mlx5e_tc_get_encap_attr(flow);
                esw_attr = attr->esw_attr;
                parse_attr = attr->parse_attr;
-               spec = &parse_attr->spec;
 
                err = mlx5e_update_vf_tunnel(esw, esw_attr, &parse_attr->mod_hdr_acts,
                                             e->out_dev, e->route_dev_ifindex,
@@ -1392,9 +1409,18 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
                        esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
                        if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
                                goto offload_to_slow_path;
+
+                       err = mlx5e_tc_offload_flow_post_acts(flow);
+                       if (err) {
+                               mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
+                                              err);
+                               goto offload_to_slow_path;
+                       }
+
                        /* update from slow path rule to encap rule */
-                       rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+                       rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
                        if (IS_ERR(rule)) {
+                               mlx5e_tc_unoffload_flow_post_acts(flow);
                                err = PTR_ERR(rule);
                                mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
                                               err);
index a709b2e9f3f2f519f1b0fccf64860cc1c65b1feb..b9d6a2e8b2400ca211ba1d074efc27e3c0c5ab18 100644 (file)
@@ -115,6 +115,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
 static struct lock_class_key tc_ht_lock_key;
 
 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
+static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
 
 void
 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
@@ -273,6 +274,23 @@ get_sample_priv(struct mlx5e_priv *priv)
        return NULL;
 }
 
+static struct mlx5e_post_act *
+get_post_action(struct mlx5e_priv *priv)
+{
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5_rep_uplink_priv *uplink_priv;
+       struct mlx5e_rep_priv *uplink_rpriv;
+
+       if (is_mdev_switchdev_mode(priv->mdev)) {
+               uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+               uplink_priv = &uplink_rpriv->uplink_priv;
+
+               return uplink_priv->post_act;
+       }
+
+       return priv->fs.tc.post_act;
+}
+
 struct mlx5_flow_handle *
 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
                    struct mlx5_flow_spec *spec,
@@ -1193,6 +1211,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
        if (flow_flag_test(flow, HAIRPIN))
                mlx5e_hairpin_flow_del(priv, flow);
 
+       free_flow_post_acts(flow);
+
        kvfree(attr->parse_attr);
        kfree(flow->attr);
 }
@@ -1425,6 +1445,9 @@ set_encap_dests(struct mlx5e_priv *priv,
        int out_index;
        int err = 0;
 
+       if (!mlx5e_is_eswitch_flow(flow))
+               return 0;
+
        parse_attr = attr->parse_attr;
        esw_attr = attr->esw_attr;
        *vf_tun = false;
@@ -1480,6 +1503,9 @@ clean_encap_dests(struct mlx5e_priv *priv,
        struct mlx5_esw_flow_attr *esw_attr;
        int out_index;
 
+       if (!mlx5e_is_eswitch_flow(flow))
+               return;
+
        esw_attr = attr->esw_attr;
        *vf_tun = false;
 
@@ -1627,7 +1653,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
         * (1) there's no error
         * (2) there's an encap action and we don't have valid neigh
         */
-       if (!encap_valid)
+       if (!encap_valid || flow_flag_test(flow, SLOW))
                flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
        else
                flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
@@ -1712,6 +1738,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
        if (flow_flag_test(flow, L3_TO_L2_DECAP))
                mlx5e_detach_decap(priv, flow);
 
+       free_flow_post_acts(flow);
+
        kvfree(attr->esw_attr->rx_tun_attr);
        kvfree(attr->parse_attr);
        kfree(flow->attr);
@@ -1719,7 +1747,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
 
 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
 {
-       return flow->attr->counter;
+       struct mlx5_flow_attr *attr;
+
+       attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
+       return attr->counter;
 }
 
 /* Iterate over tmp_list of flows attached to flow_list head. */
@@ -3316,52 +3347,6 @@ bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
        return (fsystem_guid == psystem_guid);
 }
 
-static int
-parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
-                struct flow_action *flow_action)
-{
-       struct netlink_ext_ack *extack = parse_state->extack;
-       struct mlx5e_tc_flow *flow = parse_state->flow;
-       struct mlx5_flow_attr *attr = flow->attr;
-       enum mlx5_flow_namespace_type ns_type;
-       struct mlx5e_priv *priv = flow->priv;
-       const struct flow_action_entry *act;
-       struct mlx5e_tc_act *tc_act;
-       int err, i;
-
-       ns_type = mlx5e_get_flow_namespace(flow);
-
-       flow_action_for_each(i, act, flow_action) {
-               tc_act = mlx5e_tc_act_get(act->id, ns_type);
-               if (!tc_act) {
-                       NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
-                       return -EOPNOTSUPP;
-               }
-
-               if (!tc_act->can_offload(parse_state, act, i, attr))
-                       return -EOPNOTSUPP;
-
-               err = tc_act->parse_action(parse_state, act, priv, attr);
-               if (err)
-                       return err;
-
-               parse_state->actions |= attr->action;
-       }
-
-       flow_action_for_each(i, act, flow_action) {
-               tc_act = mlx5e_tc_act_get(act->id, ns_type);
-               if (!tc_act || !tc_act->post_parse ||
-                   !tc_act->can_offload(parse_state, act, i, attr))
-                       continue;
-
-               err = tc_act->post_parse(parse_state, priv, attr);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
 static int
 actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
                                struct mlx5e_tc_flow *flow,
@@ -3400,6 +3385,300 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
        return 0;
 }
 
+static struct mlx5_flow_attr*
+mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
+                                  enum mlx5_flow_namespace_type ns_type)
+{
+       struct mlx5e_tc_flow_parse_attr *parse_attr;
+       u32 attr_sz = ns_to_attr_sz(ns_type);
+       struct mlx5_flow_attr *attr2;
+
+       attr2 = mlx5_alloc_flow_attr(ns_type);
+       parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
+       if (!attr2 || !parse_attr) {
+               kvfree(parse_attr);
+               kfree(attr2);
+               return attr2;
+       }
+
+       memcpy(attr2, attr, attr_sz);
+       INIT_LIST_HEAD(&attr2->list);
+       parse_attr->filter_dev = attr->parse_attr->filter_dev;
+       attr2->action = 0;
+       attr2->flags = 0;
+       attr2->parse_attr = parse_attr;
+       return attr2;
+}
+
+static struct mlx5_core_dev *
+get_flow_counter_dev(struct mlx5e_tc_flow *flow)
+{
+       return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
+}
+
+struct mlx5_flow_attr *
+mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
+{
+       struct mlx5_esw_flow_attr *esw_attr;
+       struct mlx5_flow_attr *attr;
+       int i;
+
+       list_for_each_entry(attr, &flow->attrs, list) {
+               esw_attr = attr->esw_attr;
+               for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
+                       if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)
+                               return attr;
+               }
+       }
+
+       return NULL;
+}
+
+void
+mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+       struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+       struct mlx5_flow_attr *attr;
+
+       list_for_each_entry(attr, &flow->attrs, list) {
+               if (list_is_last(&attr->list, &flow->attrs))
+                       break;
+
+               mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle);
+       }
+}
+
+static void
+free_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+       struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
+       struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+       struct mlx5_flow_attr *attr, *tmp;
+       bool vf_tun;
+
+       list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
+               if (list_is_last(&attr->list, &flow->attrs))
+                       break;
+
+               if (attr->post_act_handle)
+                       mlx5e_tc_post_act_del(post_act, attr->post_act_handle);
+
+               clean_encap_dests(flow->priv, flow, attr, &vf_tun);
+
+               if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+                       mlx5_fc_destroy(counter_dev, attr->counter);
+
+               if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+                       mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
+                       if (attr->modify_hdr)
+                               mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
+               }
+
+               list_del(&attr->list);
+               kvfree(attr->parse_attr);
+               kfree(attr);
+       }
+}
+
+int
+mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+       struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+       struct mlx5_flow_attr *attr;
+       int err = 0;
+
+       list_for_each_entry(attr, &flow->attrs, list) {
+               if (list_is_last(&attr->list, &flow->attrs))
+                       break;
+
+               err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle);
+               if (err)
+                       break;
+       }
+
+       return err;
+}
+
+/* TC filter rule HW translation:
+ *
+ * +---------------------+
+ * + ft prio (tc chain)  +
+ * + original match      +
+ * +---------------------+
+ *           |
+ *           | if multi table action
+ *           |
+ *           v
+ * +---------------------+
+ * + post act ft         |<----.
+ * + match fte id        |     | split on multi table action
+ * + do actions          |-----'
+ * +---------------------+
+ *           |
+ *           |
+ *           v
+ * Do rest of the actions after last multi table action.
+ */
+static int
+alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
+{
+       struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+       struct mlx5_flow_attr *attr, *next_attr = NULL;
+       struct mlx5e_post_act_handle *handle;
+       bool vf_tun, encap_valid = true;
+       int err;
+
+       /* This is going in reverse order as needed.
+        * The first entry is the last attribute.
+        */
+       list_for_each_entry(attr, &flow->attrs, list) {
+               if (!next_attr) {
+                       /* Set counter action on last post act rule. */
+                       attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+               } else {
+                       attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+                       err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
+                       if (err)
+                               goto out_free;
+               }
+
+               /* Don't add post_act rule for first attr (last in the list).
+                * It's being handled by the caller.
+                */
+               if (list_is_last(&attr->list, &flow->attrs))
+                       break;
+
+               err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun);
+               if (err)
+                       goto out_free;
+
+               if (!encap_valid)
+                       flow_flag_set(flow, SLOW);
+
+               err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
+               if (err)
+                       goto out_free;
+
+               if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+                       err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
+                       if (err)
+                               goto out_free;
+               }
+
+               if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+                       err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
+                       if (err)
+                               goto out_free;
+               }
+
+               handle = mlx5e_tc_post_act_add(post_act, attr);
+               if (IS_ERR(handle)) {
+                       err = PTR_ERR(handle);
+                       goto out_free;
+               }
+
+               attr->post_act_handle = handle;
+               next_attr = attr;
+       }
+
+       if (flow_flag_test(flow, SLOW))
+               goto out;
+
+       err = mlx5e_tc_offload_flow_post_acts(flow);
+       if (err)
+               goto out_free;
+
+out:
+       return 0;
+
+out_free:
+       free_flow_post_acts(flow);
+       return err;
+}
+
+static int
+parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
+                struct flow_action *flow_action)
+{
+       struct netlink_ext_ack *extack = parse_state->extack;
+       struct mlx5e_tc_flow_action flow_action_reorder;
+       struct mlx5e_tc_flow *flow = parse_state->flow;
+       struct mlx5_flow_attr *attr = flow->attr;
+       enum mlx5_flow_namespace_type ns_type;
+       struct mlx5e_priv *priv = flow->priv;
+       struct flow_action_entry *act, **_act;
+       struct mlx5e_tc_act *tc_act;
+       int err, i;
+
+       flow_action_reorder.num_entries = flow_action->num_entries;
+       flow_action_reorder.entries = kcalloc(flow_action->num_entries,
+                                             sizeof(flow_action), GFP_KERNEL);
+       if (!flow_action_reorder.entries)
+               return -ENOMEM;
+
+       mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder);
+
+       ns_type = mlx5e_get_flow_namespace(flow);
+       list_add(&attr->list, &flow->attrs);
+
+       flow_action_for_each(i, _act, &flow_action_reorder) {
+               act = *_act;
+               tc_act = mlx5e_tc_act_get(act->id, ns_type);
+               if (!tc_act) {
+                       NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
+                       err = -EOPNOTSUPP;
+                       goto out_free;
+               }
+
+               if (!tc_act->can_offload(parse_state, act, i, attr)) {
+                       err = -EOPNOTSUPP;
+                       goto out_free;
+               }
+
+               err = tc_act->parse_action(parse_state, act, priv, attr);
+               if (err)
+                       goto out_free;
+
+               parse_state->actions |= attr->action;
+
+               /* Split attr for multi table act if not the last act. */
+               if (tc_act->is_multi_table_act &&
+                   tc_act->is_multi_table_act(priv, act, attr) &&
+                   i < flow_action_reorder.num_entries - 1) {
+                       err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+                       if (err)
+                               goto out_free;
+
+                       attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
+                       if (!attr) {
+                               err = -ENOMEM;
+                               goto out_free;
+                       }
+
+                       list_add(&attr->list, &flow->attrs);
+               }
+       }
+
+       kfree(flow_action_reorder.entries);
+
+       err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+       if (err)
+               goto out_free_post_acts;
+
+       err = alloc_flow_post_acts(flow, extack);
+       if (err)
+               goto out_free_post_acts;
+
+       return 0;
+
+out_free:
+       kfree(flow_action_reorder.entries);
+out_free_post_acts:
+       free_flow_post_acts(flow);
+
+       return err;
+}
+
 static int
 flow_action_supported(struct flow_action *flow_action,
                      struct netlink_ext_ack *extack)
@@ -3652,7 +3931,12 @@ mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
                                sizeof(struct mlx5_nic_flow_attr);
        struct mlx5_flow_attr *attr;
 
-       return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
+       attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
+       if (!attr)
+               return attr;
+
+       INIT_LIST_HEAD(&attr->list);
+       return attr;
 }
 
 static int
@@ -3686,6 +3970,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
                INIT_LIST_HEAD(&flow->encaps[out_index].list);
        INIT_LIST_HEAD(&flow->hairpin);
        INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
+       INIT_LIST_HEAD(&flow->attrs);
        refcount_set(&flow->refcnt, 1);
        init_completion(&flow->init_done);
        init_completion(&flow->del_hw_done);
index 533c897bd517836860eb27f896268fd198de5d53..a80b00946f1b011dc29357a51471777065375223 100644 (file)
@@ -53,7 +53,6 @@
                            ESW_FLOW_ATTR_SZ :\
                            NIC_FLOW_ATTR_SZ)
 
-
 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
 
 struct mlx5e_tc_update_priv {
@@ -84,6 +83,8 @@ struct mlx5_flow_attr {
        u8 tun_ip_version;
        int tunnel_id; /* mapped tunnel id */
        u32 flags;
+       struct list_head list;
+       struct mlx5e_post_act_handle *post_act_handle;
        union {
                struct mlx5_esw_flow_attr esw_attr[0];
                struct mlx5_nic_flow_attr nic_attr[0];