iomap: Convert to release_folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sun, 1 May 2022 03:01:08 +0000 (23:01 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 10 May 2022 03:12:32 +0000 (23:12 -0400)
Change all the filesystems which used iomap_releasepage to use the
new function.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
fs/gfs2/aops.c
fs/iomap/buffered-io.c
fs/iomap/trace.h
fs/xfs/xfs_aops.c
fs/zonefs/super.c
include/linux/iomap.h

index 1016631bcbdc48b958707eafac2ec63425bf6055..3d6c5c5eb4f19d53afa98bd79e102ba3ccabad1b 100644 (file)
@@ -768,7 +768,7 @@ static const struct address_space_operations gfs2_aops = {
        .read_folio = gfs2_read_folio,
        .readahead = gfs2_readahead,
        .dirty_folio = filemap_dirty_folio,
-       .releasepage = iomap_releasepage,
+       .release_folio = iomap_release_folio,
        .invalidate_folio = iomap_invalidate_folio,
        .bmap = gfs2_bmap,
        .direct_IO = noop_direct_IO,
index 2de087ac87b661c502890032f74616de4ad08654..8532f0e2e2d68e6947451ba877a7905d1bc37588 100644 (file)
@@ -452,25 +452,23 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
 }
 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
 
-int
-iomap_releasepage(struct page *page, gfp_t gfp_mask)
+bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
 {
-       struct folio *folio = page_folio(page);
-
-       trace_iomap_releasepage(folio->mapping->host, folio_pos(folio),
+       trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
                        folio_size(folio));
 
        /*
-        * mm accommodates an old ext3 case where clean pages might not have had
-        * the dirty bit cleared. Thus, it can send actual dirty pages to
-        * ->releasepage() via shrink_active_list(); skip those here.
+        * mm accommodates an old ext3 case where clean folios might
+        * not have had the dirty bit cleared.  Thus, it can send actual
+        * dirty folios to ->release_folio() via shrink_active_list();
+        * skip those here.
         */
        if (folio_test_dirty(folio) || folio_test_writeback(folio))
-               return 0;
+               return false;
        iomap_page_release(folio);
-       return 1;
+       return true;
 }
-EXPORT_SYMBOL_GPL(iomap_releasepage);
+EXPORT_SYMBOL_GPL(iomap_release_folio);
 
 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
 {
@@ -1483,7 +1481,7 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
                 * Skip the page if it's fully outside i_size, e.g. due to a
                 * truncate operation that's in progress. We must redirty the
                 * page so that reclaim stops reclaiming it. Otherwise
-                * iomap_vm_releasepage() is called on it and gets confused.
+                * iomap_release_folio() is called on it and gets confused.
                 *
                 * Note that the end_index is unsigned long.  If the given
                 * offset is greater than 16TB on a 32-bit system then if we
index a6689a563c6e26735f5f5255ab66e479fb0dc509..d48868fc40d78f37273365e458a8dd9ea0791423 100644 (file)
@@ -80,7 +80,7 @@ DEFINE_EVENT(iomap_range_class, name, \
        TP_PROTO(struct inode *inode, loff_t off, u64 len),\
        TP_ARGS(inode, off, len))
 DEFINE_RANGE_EVENT(iomap_writepage);
-DEFINE_RANGE_EVENT(iomap_releasepage);
+DEFINE_RANGE_EVENT(iomap_release_folio);
 DEFINE_RANGE_EVENT(iomap_invalidate_folio);
 DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
 
index a9c4bb500d5385144fc5f8190759c1291cc94562..2acbfc6925dd63990c1f43ec94a8db3d2713cc29 100644 (file)
@@ -568,7 +568,7 @@ const struct address_space_operations xfs_address_space_operations = {
        .readahead              = xfs_vm_readahead,
        .writepages             = xfs_vm_writepages,
        .dirty_folio            = filemap_dirty_folio,
-       .releasepage            = iomap_releasepage,
+       .release_folio          = iomap_release_folio,
        .invalidate_folio       = iomap_invalidate_folio,
        .bmap                   = xfs_vm_bmap,
        .direct_IO              = noop_direct_IO,
index c3a38f711b24ba815c2640b3860953f6df93e874..b1a428f860b3937334bd9834d703739c4b28f003 100644 (file)
@@ -197,7 +197,7 @@ static const struct address_space_operations zonefs_file_aops = {
        .writepage              = zonefs_writepage,
        .writepages             = zonefs_writepages,
        .dirty_folio            = filemap_dirty_folio,
-       .releasepage            = iomap_releasepage,
+       .release_folio          = iomap_release_folio,
        .invalidate_folio       = iomap_invalidate_folio,
        .migratepage            = iomap_migrate_page,
        .is_partially_uptodate  = iomap_is_partially_uptodate,
index 5b2aa45ddda30a921adaf504f99d678f10746442..0d674695b6d379e4f032042a977037ede139f6a9 100644 (file)
@@ -228,7 +228,7 @@ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
 void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
 bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
-int iomap_releasepage(struct page *page, gfp_t gfp_mask);
+bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
 #ifdef CONFIG_MIGRATION
 int iomap_migrate_page(struct address_space *mapping, struct page *newpage,