struct mlx5_ct_zone_rule {
        struct mlx5_flow_handle *rule;
        struct mlx5_esw_flow_attr attr;
-       int tupleid;
        bool nat;
 };
 
 };
 
 struct mlx5_ct_entry {
-       u16 zone;
        struct rhash_head node;
        struct rhash_head tuple_node;
        struct rhash_head tuple_nat_node;
        struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
        struct mlx5_eswitch *esw = ct_priv->esw;
 
-       ct_dbg("Deleting ct entry rule in zone %d", entry->zone);
+       ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
 
        mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
        mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
-       xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid);
 }
 
 static void
                               u8 ct_state,
                               u32 mark,
                               u32 label,
-                              u32 tupleid)
+                              u16 zone)
 {
        struct mlx5_eswitch *esw = ct_priv->esw;
        int err;
                return err;
 
        err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
-                                       TUPLEID_TO_REG, tupleid);
+                                       ZONE_RESTORE_TO_REG, zone + 1);
        if (err)
                return err;
 
 mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
                                struct mlx5_esw_flow_attr *attr,
                                struct flow_rule *flow_rule,
-                               u32 tupleid,
-                               bool nat)
+                               u16 zone, bool nat)
 {
        struct mlx5e_tc_mod_hdr_acts mod_acts = {};
        struct mlx5_eswitch *esw = ct_priv->esw;
                                             ct_state,
                                             meta->ct_metadata.mark,
                                             meta->ct_metadata.labels[0],
-                                            tupleid);
+                                            zone);
        if (err)
                goto err_mapping;
 
        struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
        struct mlx5_eswitch *esw = ct_priv->esw;
        struct mlx5_flow_spec *spec = NULL;
-       u32 tupleid;
        int err;
 
        zone_rule->nat = nat;
        if (!spec)
                return -ENOMEM;
 
-       /* Get tuple unique id */
-       err = xa_alloc(&ct_priv->tuple_ids, &tupleid, zone_rule,
-                      XA_LIMIT(1, TUPLE_ID_MAX), GFP_KERNEL);
-       if (err) {
-               netdev_warn(ct_priv->netdev,
-                           "Failed to allocate tuple id, err: %d\n", err);
-               goto err_xa_alloc;
-       }
-       zone_rule->tupleid = tupleid;
-
        err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
-                                             tupleid, nat);
+                                             entry->tuple.zone, nat);
        if (err) {
                ct_dbg("Failed to create ct entry mod hdr");
                goto err_mod_hdr;
 
        mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
        mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
-                                   entry->zone & MLX5_CT_ZONE_MASK,
+                                   entry->tuple.zone & MLX5_CT_ZONE_MASK,
                                    MLX5_CT_ZONE_MASK);
 
        zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
        }
 
        kfree(spec);
-       ct_dbg("Offloaded ct entry rule in zone %d", entry->zone);
+       ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone);
 
        return 0;
 
 err_rule:
        mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
 err_mod_hdr:
-       xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid);
-err_xa_alloc:
        kfree(spec);
        return err;
 }
        if (!entry)
                return -ENOMEM;
 
-       entry->zone = ft->zone;
        entry->tuple.zone = ft->zone;
        entry->cookie = flow->cookie;
        entry->restore_cookie = meta_action->ct_metadata.cookie;
        return -EOPNOTSUPP;
 }
 
+static bool
+mlx5_tc_ct_skb_to_tuple(struct sk_buff *skb, struct mlx5_ct_tuple *tuple,
+                       u16 zone)
+{
+       struct flow_keys flow_keys;
+
+       skb_reset_network_header(skb);
+       skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
+
+       tuple->zone = zone;
+
+       if (flow_keys.basic.ip_proto != IPPROTO_TCP &&
+           flow_keys.basic.ip_proto != IPPROTO_UDP)
+               return false;
+
+       tuple->port.src = flow_keys.ports.src;
+       tuple->port.dst = flow_keys.ports.dst;
+       tuple->n_proto = flow_keys.basic.n_proto;
+       tuple->ip_proto = flow_keys.basic.ip_proto;
+
+       switch (flow_keys.basic.n_proto) {
+       case htons(ETH_P_IP):
+               tuple->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+               tuple->ip.src_v4 = flow_keys.addrs.v4addrs.src;
+               tuple->ip.dst_v4 = flow_keys.addrs.v4addrs.dst;
+               break;
+
+       case htons(ETH_P_IPV6):
+               tuple->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+               tuple->ip.src_v6 = flow_keys.addrs.v6addrs.src;
+               tuple->ip.dst_v6 = flow_keys.addrs.v6addrs.dst;
+               break;
+       default:
+               goto out;
+       }
+
+       return true;
+
+out:
+       return false;
+}
+
 int
 mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
                            struct mlx5_flow_spec *spec)
        }
 
        if (mask->ct_zone)
-               mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
+               mlx5e_tc_match_to_reg_match(spec, ZONE_RESTORE_TO_REG,
                                            key->ct_zone, MLX5_CT_ZONE_MASK);
        if (ctstate_mask)
                mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG,
                return -EOPNOTSUPP;
        }
 
+       /* To mark that the need restore ct state on a skb, we mark the
+        * packet with the zone restore register. To distinguise from an
+        * uninitalized 0 value for this register, we write zone + 1 on the
+        * packet.
+        *
+        * This restricts us to a max zone of 0xFFFE.
+        */
+       if (act->ct.zone == (u16)~0) {
+               NL_SET_ERR_MSG_MOD(extack, "Unsupported ct zone");
+               return -EOPNOTSUPP;
+       }
+
        attr->ct_attr.zone = act->ct.zone;
        attr->ct_attr.ct_action = act->ct.action;
        attr->ct_attr.nf_ft = act->ct.flow_table;
  *      | set mark
  *      | set label
  *      | set established
+ *     | set zone_restore
  *      | do nat (if needed)
  *      v
  * +--------------+
        }
 
        idr_init(&ct_priv->fte_ids);
-       xa_init_flags(&ct_priv->tuple_ids, XA_FLAGS_ALLOC1);
        mutex_init(&ct_priv->control_lock);
        rhashtable_init(&ct_priv->zone_ht, &zone_params);
        rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
        rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
        rhashtable_destroy(&ct_priv->zone_ht);
        mutex_destroy(&ct_priv->control_lock);
-       xa_destroy(&ct_priv->tuple_ids);
        idr_destroy(&ct_priv->fte_ids);
        kfree(ct_priv);
 
 
 bool
 mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
-                        struct sk_buff *skb, u32 tupleid)
+                        struct sk_buff *skb, u16 zone)
 {
        struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
-       struct mlx5_ct_zone_rule *zone_rule;
+       struct mlx5_ct_tuple tuple = {};
        struct mlx5_ct_entry *entry;
 
-       if (!ct_priv || !tupleid)
+       if (!ct_priv || !zone)
                return true;
 
-       zone_rule = xa_load(&ct_priv->tuple_ids, tupleid);
-       if (!zone_rule)
+       if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone - 1))
                return false;
 
-       entry = container_of(zone_rule, struct mlx5_ct_entry,
-                            zone_rules[zone_rule->nat]);
-       tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
+       entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &tuple,
+                                      tuples_ht_params);
+       if (!entry)
+               entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
+                                              &tuple, tuples_nat_ht_params);
+       if (!entry)
+               return false;
 
+       tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
        return true;
 }