iomap: make iomap_write_end() return a boolean
authorZhang Yi <yi.zhang@huawei.com>
Wed, 20 Mar 2024 11:05:47 +0000 (19:05 +0800)
committerChristian Brauner <brauner@kernel.org>
Thu, 25 Apr 2024 12:23:54 +0000 (14:23 +0200)
For now, we can make sure iomap_write_end() always return 0 or copied
bytes, so instead of return written bytes, convert to return a boolean
to indicate the copied bytes have been written to the pagecache.

Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
Link: https://lore.kernel.org/r/20240320110548.2200662-9-yi.zhang@huaweicloud.com
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/iomap/buffered-io.c

index 1333a0d96f83245999f582c2e54eb58822f527da..73241fbaff6bc0a4bce6a62f91ff4abda827c09e 100644 (file)
@@ -828,7 +828,7 @@ out_unlock:
        return status;
 }
 
-static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
+static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
                size_t copied, struct folio *folio)
 {
        flush_dcache_folio(folio);
@@ -845,14 +845,14 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
         * redo the whole thing.
         */
        if (unlikely(copied < len && !folio_test_uptodate(folio)))
-               return 0;
+               return false;
        iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
        iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
        filemap_dirty_folio(inode->i_mapping, folio);
-       return copied;
+       return true;
 }
 
-static size_t iomap_write_end_inline(const struct iomap_iter *iter,
+static void iomap_write_end_inline(const struct iomap_iter *iter,
                struct folio *folio, loff_t pos, size_t copied)
 {
        const struct iomap *iomap = &iter->iomap;
@@ -867,20 +867,31 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter,
        kunmap_local(addr);
 
        mark_inode_dirty(iter->inode);
-       return copied;
 }
 
-/* Returns the number of bytes copied.  May be 0.  Cannot be an errno. */
-static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
+/*
+ * Returns true if all copied bytes have been written to the pagecache,
+ * otherwise return false.
+ */
+static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
                size_t copied, struct folio *folio)
 {
        const struct iomap *srcmap = iomap_iter_srcmap(iter);
 
-       if (srcmap->type == IOMAP_INLINE)
-               return iomap_write_end_inline(iter, folio, pos, copied);
-       if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
-               return block_write_end(NULL, iter->inode->i_mapping, pos, len,
-                                      copied, &folio->page, NULL);
+       if (srcmap->type == IOMAP_INLINE) {
+               iomap_write_end_inline(iter, folio, pos, copied);
+               return true;
+       }
+
+       if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
+               size_t bh_written;
+
+               bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
+                                       len, copied, &folio->page, NULL);
+               WARN_ON_ONCE(bh_written != copied && bh_written != 0);
+               return bh_written == copied;
+       }
+
        return __iomap_write_end(iter->inode, pos, len, copied, folio);
 }
 
@@ -945,7 +956,8 @@ retry:
                        flush_dcache_folio(folio);
 
                copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
-               written = iomap_write_end(iter, pos, bytes, copied, folio);
+               written = iomap_write_end(iter, pos, bytes, copied, folio) ?
+                         copied : 0;
 
                /*
                 * Update the in-memory inode size after copying the data into
@@ -1323,6 +1335,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
                int status;
                size_t offset;
                size_t bytes = min_t(u64, SIZE_MAX, length);
+               bool ret;
 
                status = iomap_write_begin(iter, pos, bytes, &folio);
                if (unlikely(status))
@@ -1334,9 +1347,9 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
                if (bytes > folio_size(folio) - offset)
                        bytes = folio_size(folio) - offset;
 
-               bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+               ret = iomap_write_end(iter, pos, bytes, bytes, folio);
                __iomap_put_folio(iter, pos, bytes, folio);
-               if (WARN_ON_ONCE(bytes == 0))
+               if (WARN_ON_ONCE(!ret))
                        return -EIO;
 
                cond_resched();
@@ -1385,6 +1398,7 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
                int status;
                size_t offset;
                size_t bytes = min_t(u64, SIZE_MAX, length);
+               bool ret;
 
                status = iomap_write_begin(iter, pos, bytes, &folio);
                if (status)
@@ -1399,9 +1413,9 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
                folio_zero_range(folio, offset, bytes);
                folio_mark_accessed(folio);
 
-               bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+               ret = iomap_write_end(iter, pos, bytes, bytes, folio);
                __iomap_put_folio(iter, pos, bytes, folio);
-               if (WARN_ON_ONCE(bytes == 0))
+               if (WARN_ON_ONCE(!ret))
                        return -EIO;
 
                pos += bytes;