gfs2: Remove and replace gfs2_glock_queue_work
authorAndreas Gruenbacher <agruenba@redhat.com>
Fri, 12 Apr 2024 19:58:15 +0000 (21:58 +0200)
committerAndreas Gruenbacher <agruenba@redhat.com>
Wed, 24 Apr 2024 17:48:20 +0000 (19:48 +0200)
There are no more callers of gfs2_glock_queue_work() left, so remove
that helper.  With that, we can now rename __gfs2_glock_queue_work()
back to gfs2_glock_queue_work() to get rid of some unnecessary clutter.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
fs/gfs2/glock.c

index fa374617d2f191983ad5ee06eddca14c2b705fa5..9f11fc1e79eb67c46a55a1fa759508a29a7a1227 100644 (file)
@@ -274,7 +274,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
  * Enqueue the glock on the work queue.  Passes one glock reference on to the
  * work queue.
  */
-static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
+static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
        if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
                /*
                 * We are holding the lockref spinlock, and the work was still
@@ -287,12 +287,6 @@ static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay)
        }
 }
 
-static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
-       spin_lock(&gl->gl_lockref.lock);
-       __gfs2_glock_queue_work(gl, delay);
-       spin_unlock(&gl->gl_lockref.lock);
-}
-
 static void __gfs2_glock_put(struct gfs2_glock *gl)
 {
        struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
@@ -337,7 +331,8 @@ void gfs2_glock_put_async(struct gfs2_glock *gl)
        if (lockref_put_or_lock(&gl->gl_lockref))
                return;
 
-       __gfs2_glock_queue_work(gl, 0);
+       GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
+       gfs2_glock_queue_work(gl, 0);
        spin_unlock(&gl->gl_lockref.lock);
 }
 
@@ -831,7 +826,7 @@ skip_inval:
                         */
                        clear_bit(GLF_LOCK, &gl->gl_flags);
                        clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
-                       __gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
+                       gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
                        return;
                } else {
                        clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
@@ -861,7 +856,7 @@ skip_inval:
 
        /* Complete the operation now. */
        finish_xmote(gl, target);
-       __gfs2_glock_queue_work(gl, 0);
+       gfs2_glock_queue_work(gl, 0);
 }
 
 /**
@@ -909,7 +904,7 @@ out_sched:
        clear_bit(GLF_LOCK, &gl->gl_flags);
        smp_mb__after_atomic();
        gl->gl_lockref.count++;
-       __gfs2_glock_queue_work(gl, 0);
+       gfs2_glock_queue_work(gl, 0);
        return;
 
 out_unlock:
@@ -1141,12 +1136,12 @@ static void glock_work_func(struct work_struct *work)
                drop_refs--;
                if (gl->gl_name.ln_type != LM_TYPE_INODE)
                        delay = 0;
-               __gfs2_glock_queue_work(gl, delay);
+               gfs2_glock_queue_work(gl, delay);
        }
 
        /*
         * Drop the remaining glock references manually here. (Mind that
-        * __gfs2_glock_queue_work depends on the lockref spinlock begin held
+        * gfs2_glock_queue_work depends on the lockref spinlock begin held
         * here as well.)
         */
        gl->gl_lockref.count -= drop_refs;
@@ -1651,7 +1646,7 @@ unlock:
                     test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
                set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
                gl->gl_lockref.count++;
-               __gfs2_glock_queue_work(gl, 0);
+               gfs2_glock_queue_work(gl, 0);
        }
        run_queue(gl, 1);
        spin_unlock(&gl->gl_lockref.lock);
@@ -1717,7 +1712,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
                    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
                    gl->gl_name.ln_type == LM_TYPE_INODE)
                        delay = gl->gl_hold_time;
-               __gfs2_glock_queue_work(gl, delay);
+               gfs2_glock_queue_work(gl, delay);
        }
 }
 
@@ -1941,7 +1936,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
                        delay = gl->gl_hold_time;
        }
        handle_callback(gl, state, delay, true);
-       __gfs2_glock_queue_work(gl, delay);
+       gfs2_glock_queue_work(gl, delay);
        spin_unlock(&gl->gl_lockref.lock);
 }
 
@@ -2001,7 +1996,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
 
        gl->gl_lockref.count++;
        set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
-       __gfs2_glock_queue_work(gl, 0);
+       gfs2_glock_queue_work(gl, 0);
        spin_unlock(&gl->gl_lockref.lock);
 }
 
@@ -2070,7 +2065,7 @@ add_back_to_lru:
                gl->gl_lockref.count++;
                if (demote_ok(gl))
                        handle_callback(gl, LM_ST_UNLOCKED, 0, false);
-               __gfs2_glock_queue_work(gl, 0);
+               gfs2_glock_queue_work(gl, 0);
                spin_unlock(&gl->gl_lockref.lock);
                cond_resched_lock(&lru_lock);
        }
@@ -2194,7 +2189,7 @@ static void thaw_glock(struct gfs2_glock *gl)
 
        spin_lock(&gl->gl_lockref.lock);
        set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
-       __gfs2_glock_queue_work(gl, 0);
+       gfs2_glock_queue_work(gl, 0);
        spin_unlock(&gl->gl_lockref.lock);
 }
 
@@ -2213,7 +2208,7 @@ static void clear_glock(struct gfs2_glock *gl)
                gl->gl_lockref.count++;
                if (gl->gl_state != LM_ST_UNLOCKED)
                        handle_callback(gl, LM_ST_UNLOCKED, 0, false);
-               __gfs2_glock_queue_work(gl, 0);
+               gfs2_glock_queue_work(gl, 0);
        }
        spin_unlock(&gl->gl_lockref.lock);
 }