net/sched: act_ct: Always fill offloading tuple iifidx
authorVlad Buslov <vladbu@nvidia.com>
Fri, 3 Nov 2023 15:14:10 +0000 (16:14 +0100)
committerJakub Kicinski <kuba@kernel.org>
Thu, 9 Nov 2023 01:47:08 +0000 (17:47 -0800)
Referenced commit doesn't always set iifidx when offloading the flow to
hardware. Fix the following cases:

- nf_conn_act_ct_ext_fill() is called before extension is created with
nf_conn_act_ct_ext_add() in tcf_ct_act(). This can cause rule offload with
unspecified iifidx when connection is offloaded after only single
original-direction packet has been processed by tc data path. Always fill
the new nf_conn_act_ct_ext instance after creating it in
nf_conn_act_ct_ext_add().

- Offloading of unidirectional UDP NEW connections is now supported, but ct
flow iifidx field is not updated when connection is promoted to
bidirectional which can result reply-direction iifidx to be zero when
refreshing the connection. Fill in the extension and update flow iifidx
before calling flow_offload_refresh().

Fixes: 9795ded7f924 ("net/sched: act_ct: Fill offloading tuple iifidx")
Reviewed-by: Paul Blakey <paulb@nvidia.com>
Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Fixes: 6a9bad0069cf ("net/sched: act_ct: offload UDP NEW connections")
Link: https://lore.kernel.org/r/20231103151410.764271-1-vladbu@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/netfilter/nf_conntrack_act_ct.h
net/openvswitch/conntrack.c
net/sched/act_ct.c

index 078d3c52c03f982cb5d6c4469b3ac0a73656329f..e5f2f0b73a9a0dde838c59f6f84dff0cbdb97e79 100644 (file)
@@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf
 #endif
 }
 
-static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct)
+static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
+                                          enum ip_conntrack_info ctinfo)
+{
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+       struct nf_conn_act_ct_ext *act_ct_ext;
+
+       act_ct_ext = nf_conn_act_ct_ext_find(ct);
+       if (dev_net(skb->dev) == &init_net && act_ct_ext)
+               act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
+#endif
+}
+
+static inline struct
+nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
+                                          struct nf_conn *ct,
+                                          enum ip_conntrack_info ctinfo)
 {
 #if IS_ENABLED(CONFIG_NET_ACT_CT)
        struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
@@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *
                return act_ct;
 
        act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
+       nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
        return act_ct;
 #else
        return NULL;
 #endif
 }
 
-static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
-                                          enum ip_conntrack_info ctinfo)
-{
-#if IS_ENABLED(CONFIG_NET_ACT_CT)
-       struct nf_conn_act_ct_ext *act_ct_ext;
-
-       act_ct_ext = nf_conn_act_ct_ext_find(ct);
-       if (dev_net(skb->dev) == &init_net && act_ct_ext)
-               act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
-#endif
-}
-
 #endif /* _NF_CONNTRACK_ACT_CT_H */
index 0b9a785dea45951dc625d5c5a05c5610106ea915..3019a4406ca4f72be806ff922e377ea7609c3934 100644 (file)
@@ -985,7 +985,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
                if (err)
                        return err;
 
-               nf_conn_act_ct_ext_add(ct);
+               nf_conn_act_ct_ext_add(skb, ct, ctinfo);
        } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
                   labels_nonzero(&info->labels.mask)) {
                err = ovs_ct_set_labels(ct, key, &info->labels.value,
index 9583645e86c280d33e6681d1b770540a89497443..0db0ecf1d11038a49e487e36b2eb33a028ae8727 100644 (file)
@@ -376,6 +376,17 @@ static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
        entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
 }
 
+static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
+{
+       struct nf_conn_act_ct_ext *act_ct_ext;
+
+       act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
+       if (act_ct_ext) {
+               tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
+               tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
+       }
+}
+
 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
                                  struct nf_conn *ct,
                                  bool tcp, bool bidirectional)
@@ -671,6 +682,8 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
        else
                ctinfo = IP_CT_ESTABLISHED_REPLY;
 
+       nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
+       tcf_ct_flow_ct_ext_ifidx_update(flow);
        flow_offload_refresh(nf_ft, flow, force_refresh);
        if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
                /* Process this flow in SW to allow promoting to ASSURED */
@@ -1034,7 +1047,7 @@ do_nat:
                tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
 
                if (!nf_ct_is_confirmed(ct))
-                       nf_conn_act_ct_ext_add(ct);
+                       nf_conn_act_ct_ext_add(skb, ct, ctinfo);
 
                /* This will take care of sending queued events
                 * even if the connection is already confirmed.