fuse: add fuse_dio_lock/unlock helper functions
authorBernd Schubert <bschubert@ddn.com>
Sat, 23 Dec 2023 23:05:53 +0000 (00:05 +0100)
committerMiklos Szeredi <mszeredi@redhat.com>
Fri, 23 Feb 2024 16:36:32 +0000 (17:36 +0100)
So far this is just a helper to remove complex locking logic out of
fuse_direct_write_iter.  Especially needed by the next patch in the series
to that adds the fuse inode cache IO mode and adds in even more locking
complexity.

Signed-off-by: Bernd Schubert <bschubert@ddn.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
fs/fuse/file.c

index 66b3c79ed385d4490980d25a793ff8ce5c047f08..0115428c33a2572fb8aea56a3b397d9e246ef24c 100644 (file)
@@ -1340,6 +1340,37 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from
        return false;
 }
 
+static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
+                         bool *exclusive)
+{
+       struct inode *inode = file_inode(iocb->ki_filp);
+
+       *exclusive = fuse_dio_wr_exclusive_lock(iocb, from);
+       if (*exclusive) {
+               inode_lock(inode);
+       } else {
+               inode_lock_shared(inode);
+               /*
+                * Previous check was without inode lock and might have raced,
+                * check again.
+                */
+               if (fuse_io_past_eof(iocb, from)) {
+                       inode_unlock_shared(inode);
+                       inode_lock(inode);
+                       *exclusive = true;
+               }
+       }
+}
+
+static void fuse_dio_unlock(struct inode *inode, bool exclusive)
+{
+       if (exclusive) {
+               inode_unlock(inode);
+       } else {
+               inode_unlock_shared(inode);
+       }
+}
+
 static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
@@ -1604,30 +1635,9 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct inode *inode = file_inode(iocb->ki_filp);
        struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
        ssize_t res;
-       bool exclusive_lock = fuse_dio_wr_exclusive_lock(iocb, from);
-
-       /*
-        * Take exclusive lock if
-        * - Parallel direct writes are disabled - a user space decision
-        * - Parallel direct writes are enabled and i_size is being extended.
-        * - Shared mmap on direct_io file is supported (FUSE_DIRECT_IO_ALLOW_MMAP).
-        *   This might not be needed at all, but needs further investigation.
-        */
-       if (exclusive_lock)
-               inode_lock(inode);
-       else {
-               inode_lock_shared(inode);
-
-               /*
-                * Previous check was without any lock and might have raced.
-                */
-               if (fuse_io_past_eof(iocb, from)) {
-                       inode_unlock_shared(inode);
-                       inode_lock(inode);
-                       exclusive_lock = true;
-               }
-       }
+       bool exclusive;
 
+       fuse_dio_lock(iocb, from, &exclusive);
        res = generic_write_checks(iocb, from);
        if (res > 0) {
                if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
@@ -1638,10 +1648,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
                        fuse_write_update_attr(inode, iocb->ki_pos, res);
                }
        }
-       if (exclusive_lock)
-               inode_unlock(inode);
-       else
-               inode_unlock_shared(inode);
+       fuse_dio_unlock(inode, exclusive);
 
        return res;
 }