{
        const struct gfs2_glock_operations *glops = gl->gl_ops;
        struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+       struct lm_lockstruct *ls = &sdp->sd_lockstruct;
        unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
        int ret;
 
            (gl->gl_state == LM_ST_EXCLUSIVE) ||
            (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
                clear_bit(GLF_BLOCKING, &gl->gl_flags);
+       if (!glops->go_inval && !glops->go_sync)
+               goto skip_inval;
+
        spin_unlock(&gl->gl_lockref.lock);
        if (glops->go_sync) {
                ret = glops->go_sync(gl);
                                fs_err(sdp, "Error %d syncing glock \n", ret);
                                gfs2_dump_glock(NULL, gl, true);
                        }
+                       spin_lock(&gl->gl_lockref.lock);
                        goto skip_inval;
                }
        }
                glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
                clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
        }
+       spin_lock(&gl->gl_lockref.lock);
 
 skip_inval:
-       gfs2_glock_hold(gl);
+       gl->gl_lockref.count++;
        /*
         * Check for an error encountered since we called go_sync and go_inval.
         * If so, we can't withdraw from the glock code because the withdraw
                         */
                        clear_bit(GLF_LOCK, &gl->gl_flags);
                        clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
-                       gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
-                       goto out;
+                       __gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
+                       return;
                } else {
                        clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
                }
        }
 
-       if (sdp->sd_lockstruct.ls_ops->lm_lock) {
-               struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+       if (ls->ls_ops->lm_lock) {
+               spin_unlock(&gl->gl_lockref.lock);
+               ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
+               spin_lock(&gl->gl_lockref.lock);
 
-               /* lock_dlm */
-               ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
                if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
                    target == LM_ST_UNLOCKED &&
                    test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
-                       spin_lock(&gl->gl_lockref.lock);
-                       finish_xmote(gl, target);
-                       __gfs2_glock_queue_work(gl, 0);
-                       spin_unlock(&gl->gl_lockref.lock);
+                       /*
+                        * The lockspace has been released and the lock has
+                        * been unlocked implicitly.
+                        */
                } else if (ret) {
                        fs_err(sdp, "lm_lock ret %d\n", ret);
-                       GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp));
+                       target = gl->gl_state | LM_OUT_ERROR;
+               } else {
+                       /* The operation will be completed asynchronously. */
+                       return;
                }
-       } else { /* lock_nolock */
-               spin_lock(&gl->gl_lockref.lock);
-               finish_xmote(gl, target);
-               __gfs2_glock_queue_work(gl, 0);
-               spin_unlock(&gl->gl_lockref.lock);
        }
-out:
-       spin_lock(&gl->gl_lockref.lock);
+
+       /* Complete the operation now. */
+       finish_xmote(gl, target);
+       __gfs2_glock_queue_work(gl, 0);
 }
 
 /**