gfs2: Replace gfs2_glock_queue_put with gfs2_glock_put_async
authorAndreas Gruenbacher <agruenba@redhat.com>
Fri, 15 Mar 2024 15:45:39 +0000 (16:45 +0100)
committerAndreas Gruenbacher <agruenba@redhat.com>
Tue, 9 Apr 2024 16:35:57 +0000 (18:35 +0200)
Function gfs2_glock_queue_put() puts a glock reference by enqueuing
glock work instead of putting the reference directly.  This ensures that
the operation won't sleep, but it is costly and really only necessary
when putting the final glock reference.  Replace it with a new
gfs2_glock_put_async() function that only queues glock work when putting
the last glock reference.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
fs/gfs2/glock.c
fs/gfs2/glock.h
fs/gfs2/log.c
fs/gfs2/super.c

index 34540f9d011ca6ca46496900fc74428f9a4f632e..ed90033b9c721a5f6792525e025c831ca4c4d805 100644 (file)
@@ -285,14 +285,6 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
        sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
 }
 
-/*
- * Cause the glock to be put in work queue context.
- */
-void gfs2_glock_queue_put(struct gfs2_glock *gl)
-{
-       gfs2_glock_queue_work(gl, 0);
-}
-
 /**
  * gfs2_glock_put() - Decrement reference count on glock
  * @gl: The glock to put
@@ -307,6 +299,22 @@ void gfs2_glock_put(struct gfs2_glock *gl)
        __gfs2_glock_put(gl);
 }
 
+/*
+ * gfs2_glock_put_async - Decrement reference count without sleeping
+ * @gl: The glock to put
+ *
+ * Decrement the reference count on glock immediately unless it is the last
+ * reference.  Defer putting the last reference to work queue context.
+ */
+void gfs2_glock_put_async(struct gfs2_glock *gl)
+{
+       if (lockref_put_or_lock(&gl->gl_lockref))
+               return;
+
+       __gfs2_glock_queue_work(gl, 0);
+       spin_unlock(&gl->gl_lockref.lock);
+}
+
 /**
  * may_grant - check if it's ok to grant a new lock
  * @gl: The glock
@@ -2529,8 +2537,7 @@ static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
        if (gl) {
                if (n == 0)
                        return;
-               if (!lockref_put_not_zero(&gl->gl_lockref))
-                       gfs2_glock_queue_put(gl);
+               gfs2_glock_put_async(gl);
        }
        for (;;) {
                gl = rhashtable_walk_next(&gi->hti);
index 0114f3e0ebe017b898ebb7aaa56e364f3792a7ed..2c697674a86fa5d11e44ff6aecc1258af8b76d8a 100644 (file)
@@ -172,7 +172,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
                   int create, struct gfs2_glock **glp);
 struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl);
 void gfs2_glock_put(struct gfs2_glock *gl);
-void gfs2_glock_queue_put(struct gfs2_glock *gl);
+void gfs2_glock_put_async(struct gfs2_glock *gl);
 
 void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
                        u16 flags, struct gfs2_holder *gh,
index a6dd68b458cec89af6129dea9caa2ca44d8dc900..6ee6013fb8253679f937e7e1faa9c5aecf3e4c5a 100644 (file)
@@ -786,7 +786,7 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
 {
        if (atomic_dec_return(&gl->gl_revokes) == 0) {
                clear_bit(GLF_LFLUSH, &gl->gl_flags);
-               gfs2_glock_queue_put(gl);
+               gfs2_glock_put_async(gl);
        }
 }
 
index 9de789b78bc50077b8dd8ab2e190d7d1d4a8ca31..d481db9510ac4da7806113760fab1481acade9bf 100644 (file)
@@ -1049,7 +1049,7 @@ static int gfs2_drop_inode(struct inode *inode)
 
                gfs2_glock_hold(gl);
                if (!gfs2_queue_try_to_evict(gl))
-                       gfs2_glock_queue_put(gl);
+                       gfs2_glock_put_async(gl);
                return 0;
        }
 
@@ -1255,7 +1255,7 @@ out_qs:
 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
 {
        if (current->flags & PF_MEMALLOC)
-               gfs2_glock_queue_put(gl);
+               gfs2_glock_put_async(gl);
        else
                gfs2_glock_put(gl);
 }