rcu/kvfree: Remove useless monitor_todo flag
authorJoel Fernandes (Google) <joel@joelfernandes.org>
Thu, 2 Jun 2022 08:06:43 +0000 (10:06 +0200)
committerPaul E. McKenney <paulmck@kernel.org>
Tue, 19 Jul 2022 18:40:00 +0000 (11:40 -0700)
monitor_todo is not needed as the work struct already tracks
if work is pending. Just use that to know if work is pending
using schedule_delayed_work() helper.

Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Neeraj Upadhyay <quic_neeraju@quicinc.com>
kernel/rcu/tree.c

index 9d9a2a657823272a1c7d93f1da1fe75d3cedb54f..6f4656aed962aa27d4d4f40a57814b00ea7084ce 100644 (file)
@@ -3216,7 +3216,6 @@ struct kfree_rcu_cpu_work {
  * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
  * @lock: Synchronize access to this structure
  * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
- * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
  * @initialized: The @rcu_work fields have been initialized
  * @count: Number of objects for which GP not started
  * @bkvcache:
@@ -3241,7 +3240,6 @@ struct kfree_rcu_cpu {
        struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
        raw_spinlock_t lock;
        struct delayed_work monitor_work;
-       bool monitor_todo;
        bool initialized;
        int count;
 
@@ -3421,6 +3419,18 @@ static void kfree_rcu_work(struct work_struct *work)
        }
 }
 
+static bool
+need_offload_krc(struct kfree_rcu_cpu *krcp)
+{
+       int i;
+
+       for (i = 0; i < FREE_N_CHANNELS; i++)
+               if (krcp->bkvhead[i])
+                       return true;
+
+       return !!krcp->head;
+}
+
 /*
  * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
  */
@@ -3477,9 +3487,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
        // of the channels that is still busy we should rearm the
        // work to repeat an attempt. Because previous batches are
        // still in progress.
-       if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
-               krcp->monitor_todo = false;
-       else
+       if (need_offload_krc(krcp))
                schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
 
        raw_spin_unlock_irqrestore(&krcp->lock, flags);
@@ -3667,11 +3675,8 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
        WRITE_ONCE(krcp->count, krcp->count + 1);
 
        // Set timer to drain after KFREE_DRAIN_JIFFIES.
-       if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
-           !krcp->monitor_todo) {
-               krcp->monitor_todo = true;
+       if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
                schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
-       }
 
 unlock_return:
        krc_this_cpu_unlock(krcp, flags);
@@ -3746,14 +3751,8 @@ void __init kfree_rcu_scheduler_running(void)
                struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
 
                raw_spin_lock_irqsave(&krcp->lock, flags);
-               if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
-                               krcp->monitor_todo) {
-                       raw_spin_unlock_irqrestore(&krcp->lock, flags);
-                       continue;
-               }
-               krcp->monitor_todo = true;
-               schedule_delayed_work_on(cpu, &krcp->monitor_work,
-                                        KFREE_DRAIN_JIFFIES);
+               if (need_offload_krc(krcp))
+                       schedule_delayed_work_on(cpu, &krcp->monitor_work, KFREE_DRAIN_JIFFIES);
                raw_spin_unlock_irqrestore(&krcp->lock, flags);
        }
 }