iomap: move all remaining per-folio logic into iomap_writepage_map
authorChristoph Hellwig <hch@lst.de>
Thu, 7 Dec 2023 07:27:02 +0000 (08:27 +0100)
committerChristian Brauner <brauner@kernel.org>
Thu, 1 Feb 2024 13:20:11 +0000 (14:20 +0100)
Move the tracepoint and the iomap check from iomap_do_writepage into
iomap_writepage_map.  This keeps all logic in one places, and leaves
iomap_do_writepage just as the wrapper for the callback conventions of
write_cache_pages, which will go away when that is converted to an
iterator.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20231207072710.176093-7-hch@lst.de
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/iomap/buffered-io.c

index 75278e1b05f822f1b06606cc32cdb157bd625aef..e3175d3cc0362a223ec0c3e088791f5686b0de33 100644 (file)
@@ -1832,19 +1832,25 @@ static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
  * At the end of a writeback pass, there will be a cached ioend remaining on the
  * writepage context that the caller will need to submit.
  */
-static int
-iomap_writepage_map(struct iomap_writepage_ctx *wpc,
-               struct writeback_control *wbc, struct inode *inode,
-               struct folio *folio, u64 end_pos)
+static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
+               struct writeback_control *wbc, struct folio *folio)
 {
        struct iomap_folio_state *ifs = folio->private;
+       struct inode *inode = folio->mapping->host;
        struct iomap_ioend *ioend, *next;
        unsigned len = i_blocksize(inode);
        unsigned nblocks = i_blocks_per_folio(inode, folio);
        u64 pos = folio_pos(folio);
+       u64 end_pos = pos + folio_size(folio);
        int error = 0, count = 0, i;
        LIST_HEAD(submit_list);
 
+       trace_iomap_writepage(inode, pos, folio_size(folio));
+
+       if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
+               folio_unlock(folio);
+               return 0;
+       }
        WARN_ON_ONCE(end_pos <= pos);
 
        if (!ifs && nblocks > 1) {
@@ -1944,28 +1950,10 @@ done:
        return error;
 }
 
-/*
- * Write out a dirty page.
- *
- * For delalloc space on the page, we need to allocate space and flush it.
- * For unwritten space on the page, we need to start the conversion to
- * regular allocated space.
- */
 static int iomap_do_writepage(struct folio *folio,
                struct writeback_control *wbc, void *data)
 {
-       struct iomap_writepage_ctx *wpc = data;
-       struct inode *inode = folio->mapping->host;
-       u64 end_pos = folio_pos(folio) + folio_size(folio);
-
-       trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
-
-       if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
-               folio_unlock(folio);
-               return 0;
-       }
-
-       return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
+       return iomap_writepage_map(data, wbc, folio);
 }
 
 int