net: add napi_busy_loop_rcu()
authorStefan Roesch <shr@devkernel.io>
Tue, 6 Feb 2024 16:30:04 +0000 (09:30 -0700)
committerJakub Kicinski <kuba@kernel.org>
Fri, 9 Feb 2024 18:01:09 +0000 (10:01 -0800)
This adds the napi_busy_loop_rcu() function. This function assumes that
the calling function is already holding the rcu read lock and
napi_busy_loop() does not need to take the rcu read lock. Add a
NAPI_F_NO_SCHED flag, which tells __napi_busy_loop() to abort if we
need to reschedule rather than drop the RCU read lock and reschedule.

Signed-off-by: Stefan Roesch <shr@devkernel.io>
Link: https://lore.kernel.org/r/20230608163839.2891748-3-shr@devkernel.io
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/busy_poll.h
net/core/dev.c

index 4dabeb6c76d31da1e3725a091a0a2636fcc9667c..9b09acac538eed8dbaa2576bf2af926ecd98eb44 100644 (file)
@@ -48,6 +48,10 @@ void napi_busy_loop(unsigned int napi_id,
                    bool (*loop_end)(void *, unsigned long),
                    void *loop_end_arg, bool prefer_busy_poll, u16 budget);
 
+void napi_busy_loop_rcu(unsigned int napi_id,
+                       bool (*loop_end)(void *, unsigned long),
+                       void *loop_end_arg, bool prefer_busy_poll, u16 budget);
+
 #else /* CONFIG_NET_RX_BUSY_POLL */
 static inline unsigned long net_busy_loop_on(void)
 {
index 1eaed657f2c2401d29c79c9057b85df1ec45c64b..ffa394f3e7968eddb48ba2fbe41ca2e301553be8 100644 (file)
@@ -6179,6 +6179,7 @@ static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
 
 enum {
        NAPI_F_PREFER_BUSY_POLL = 1,
+       NAPI_F_END_ON_RESCHED   = 2,
 };
 
 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
@@ -6285,6 +6286,8 @@ count:
                        break;
 
                if (unlikely(need_resched())) {
+                       if (flags & NAPI_F_END_ON_RESCHED)
+                               break;
                        if (napi_poll)
                                busy_poll_stop(napi, have_poll_lock, flags, budget);
                        if (!IS_ENABLED(CONFIG_PREEMPT_RT))
@@ -6304,6 +6307,18 @@ count:
                preempt_enable();
 }
 
+void napi_busy_loop_rcu(unsigned int napi_id,
+                       bool (*loop_end)(void *, unsigned long),
+                       void *loop_end_arg, bool prefer_busy_poll, u16 budget)
+{
+       unsigned flags = NAPI_F_END_ON_RESCHED;
+
+       if (prefer_busy_poll)
+               flags |= NAPI_F_PREFER_BUSY_POLL;
+
+       __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
+}
+
 void napi_busy_loop(unsigned int napi_id,
                    bool (*loop_end)(void *, unsigned long),
                    void *loop_end_arg, bool prefer_busy_poll, u16 budget)