MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
 };
 
+#define MLX5E_TC_MAX_SPLITS 1
+
 struct mlx5e_tc_flow {
        struct rhash_head       node;
        struct mlx5e_priv       *priv;
        u64                     cookie;
        u8                      flags;
-       struct mlx5_flow_handle *rule;
+       struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
        struct list_head        encap;   /* flows sharing the same encap ID */
        struct list_head        mod_hdr; /* flows sharing the same mod hdr ID */
        struct list_head        hairpin; /* flows sharing the same hairpin */
        struct mlx5_nic_flow_attr *attr = flow->nic_attr;
        struct mlx5_fc *counter = NULL;
 
-       counter = mlx5_flow_rule_counter(flow->rule);
-       mlx5_del_flow_rules(flow->rule);
+       counter = mlx5_flow_rule_counter(flow->rule[0]);
+       mlx5_del_flow_rules(flow->rule[0]);
        mlx5_fc_destroy(priv->mdev, counter);
 
        if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
                rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
                if (IS_ERR(rule))
                        goto err_add_rule;
+
+               if (attr->mirror_count) {
+                       flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr);
+                       if (IS_ERR(flow->rule[1]))
+                               goto err_fwd_rule;
+               }
        }
        return rule;
 
+err_fwd_rule:
+       mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+       rule = flow->rule[1];
 err_add_rule:
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
                mlx5e_detach_mod_hdr(priv, flow);
 
        if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
                flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
-               mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
+               if (attr->mirror_count)
+                       mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
+               mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
        }
 
        mlx5_eswitch_del_vlan_action(esw, attr);
        list_for_each_entry(flow, &e->flows, encap) {
                esw_attr = flow->esw_attr;
                esw_attr->encap_id = e->encap_id;
-               flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
-               if (IS_ERR(flow->rule)) {
-                       err = PTR_ERR(flow->rule);
+               flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
+               if (IS_ERR(flow->rule[0])) {
+                       err = PTR_ERR(flow->rule[0]);
                        mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
                                       err);
                        continue;
                }
+
+               if (esw_attr->mirror_count) {
+                       flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
+                       if (IS_ERR(flow->rule[1])) {
+                               mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr);
+                               err = PTR_ERR(flow->rule[1]);
+                               mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n",
+                                              err);
+                               continue;
+                       }
+               }
+
                flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
        }
 }
 
        list_for_each_entry(flow, &e->flows, encap) {
                if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+                       struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+
                        flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
-                       mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
+                       if (attr->mirror_count)
+                               mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
+                       mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
                }
        }
 
                        continue;
                list_for_each_entry(flow, &e->flows, encap) {
                        if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
-                               counter = mlx5_flow_rule_counter(flow->rule);
+                               counter = mlx5_flow_rule_counter(flow->rule[0]);
                                mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
                                if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
                                        neigh_used = true;
                err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
                if (err < 0)
                        goto err_free;
-               flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
+               flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
        } else {
                err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
                if (err < 0)
                        goto err_free;
-               flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
+               flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
        }
 
-       if (IS_ERR(flow->rule)) {
-               err = PTR_ERR(flow->rule);
+       if (IS_ERR(flow->rule[0])) {
+               err = PTR_ERR(flow->rule[0]);
                if (err != -EAGAIN)
                        goto err_free;
        }
        if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
                return 0;
 
-       counter = mlx5_flow_rule_counter(flow->rule);
+       counter = mlx5_flow_rule_counter(flow->rule[0]);
        if (!counter)
                return 0;
 
 
 {
        struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
        struct mlx5_flow_act flow_act = {0};
+       struct mlx5_flow_table *ft = NULL;
        struct mlx5_fc *counter = NULL;
        struct mlx5_flow_handle *rule;
        int j, i = 0;
        if (esw->mode != SRIOV_OFFLOADS)
                return ERR_PTR(-EOPNOTSUPP);
 
+       if (attr->mirror_count)
+               ft = esw->fdb_table.offloads.fwd_fdb;
+       else
+               ft = esw->fdb_table.offloads.fast_fdb;
+
        flow_act.action = attr->action;
        /* if per flow vlan pop/push is emulated, don't set that into the firmware */
        if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
                for (j = attr->mirror_count; j < attr->out_count; j++) {
                        dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
                        dest[i].vport.num = attr->out_rep[j]->vport;
-                       if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
-                               dest[i].vport.vhca_id =
-                                       MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
-                               dest[i].vport.vhca_id_valid = 1;
-                       }
+                       dest[i].vport.vhca_id =
+                               MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
+                       dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
                        i++;
                }
        }
        if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
                flow_act.encap_id = attr->encap_id;
 
-       rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.offloads.fast_fdb,
-                                  spec, &flow_act, dest, i);
+       rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
        if (IS_ERR(rule))
                goto err_add_rule;
        else
        return rule;
 }
 
+struct mlx5_flow_handle *
+mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
+                         struct mlx5_flow_spec *spec,
+                         struct mlx5_esw_flow_attr *attr)
+{
+       struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
+       struct mlx5_flow_act flow_act = {0};
+       struct mlx5_flow_handle *rule;
+       void *misc;
+       int i;
+
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       for (i = 0; i < attr->mirror_count; i++) {
+               dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+               dest[i].vport.num = attr->out_rep[i]->vport;
+               dest[i].vport.vhca_id =
+                       MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
+               dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
+       }
+       dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
+       i++;
+
+       misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+       MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
+
+       if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+               MLX5_SET(fte_match_set_misc, misc,
+                        source_eswitch_owner_vhca_id,
+                        MLX5_CAP_GEN(attr->in_mdev, vhca_id));
+
+       misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+       MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+       if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+               MLX5_SET_TO_ONES(fte_match_set_misc, misc,
+                                source_eswitch_owner_vhca_id);
+
+       if (attr->match_level == MLX5_MATCH_NONE)
+               spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+       else
+               spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
+                                             MLX5_MATCH_MISC_PARAMETERS;
+
+       rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
+
+       if (!IS_ERR(rule))
+               esw->offloads.num_flows++;
+
+       return rule;
+}
+
 void
 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
                                struct mlx5_flow_handle *rule,