net: sched: cls_api: add skip_sw counter
authorAsbjørn Sloth Tønnesen <ast@fiberby.net>
Mon, 25 Mar 2024 20:47:34 +0000 (20:47 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 29 Mar 2024 09:46:38 +0000 (09:46 +0000)
Maintain a count of skip_sw filters.

This counter is protected by the cb_lock, and is updated
at the same time as offloadcnt.

Signed-off-by: Asbjørn Sloth Tønnesen <ast@fiberby.net>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/sch_generic.h
net/sched/cls_api.c

index cefe0c4bdae34c91868c22731a3b666f8e16e996..120a4ca6ec9b2f070111168b25d31ced50be3782 100644 (file)
@@ -471,6 +471,7 @@ struct tcf_block {
        struct flow_block flow_block;
        struct list_head owner_list;
        bool keep_dst;
+       atomic_t skipswcnt; /* Number of skip_sw filters */
        atomic_t offloadcnt; /* Number of oddloaded filters */
        unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
        unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
index ca5676b2668e1f2c42dc332b3c90cfd4f80159b3..397c3d29659cdcc6fcac40d74ea127493d682119 100644 (file)
@@ -3483,6 +3483,8 @@ static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
        if (*flags & TCA_CLS_FLAGS_IN_HW)
                return;
        *flags |= TCA_CLS_FLAGS_IN_HW;
+       if (tc_skip_sw(*flags))
+               atomic_inc(&block->skipswcnt);
        atomic_inc(&block->offloadcnt);
 }
 
@@ -3491,6 +3493,8 @@ static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
        if (!(*flags & TCA_CLS_FLAGS_IN_HW))
                return;
        *flags &= ~TCA_CLS_FLAGS_IN_HW;
+       if (tc_skip_sw(*flags))
+               atomic_dec(&block->skipswcnt);
        atomic_dec(&block->offloadcnt);
 }