mm: return void from folio_start_writeback() and related functions
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 8 Nov 2023 20:46:05 +0000 (20:46 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 11 Dec 2023 00:51:37 +0000 (16:51 -0800)
Nobody now checks the return value from any of these functions, so
add an assertion at the beginning of the function and return void.

Link: https://lkml.kernel.org/r/20231108204605.745109-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Steve French <sfrench@samba.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/page-flags.h
mm/folio-compat.c
mm/page-writeback.c

index a440062e93865f5de6cfcb1bd42f777988a30ea7..735cddc13d20e1fdb1cad4615a78034156d6f28d 100644 (file)
@@ -772,8 +772,8 @@ static __always_inline void SetPageUptodate(struct page *page)
 
 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
 
-bool __folio_start_writeback(struct folio *folio, bool keep_write);
-bool set_page_writeback(struct page *page);
+void __folio_start_writeback(struct folio *folio, bool keep_write);
+void set_page_writeback(struct page *page);
 
 #define folio_start_writeback(folio)                   \
        __folio_start_writeback(folio, false)
index 10c3247542cbefc18871f109c1c70a2ff2bfb12f..aee3b9a16828539e2168f6b7fbefe278d5746d54 100644 (file)
@@ -46,9 +46,9 @@ void mark_page_accessed(struct page *page)
 }
 EXPORT_SYMBOL(mark_page_accessed);
 
-bool set_page_writeback(struct page *page)
+void set_page_writeback(struct page *page)
 {
-       return folio_start_writeback(page_folio(page));
+       folio_start_writeback(page_folio(page));
 }
 EXPORT_SYMBOL(set_page_writeback);
 
index ee2fd6a6af40728b3990773b464f362222be0621..ca64bd513fa2c125147954ac662217d33f507ac8 100644 (file)
@@ -2982,67 +2982,63 @@ bool __folio_end_writeback(struct folio *folio)
        return ret;
 }
 
-bool __folio_start_writeback(struct folio *folio, bool keep_write)
+void __folio_start_writeback(struct folio *folio, bool keep_write)
 {
        long nr = folio_nr_pages(folio);
        struct address_space *mapping = folio_mapping(folio);
-       bool ret;
        int access_ret;
 
+       VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
+
        folio_memcg_lock(folio);
        if (mapping && mapping_use_writeback_tags(mapping)) {
                XA_STATE(xas, &mapping->i_pages, folio_index(folio));
                struct inode *inode = mapping->host;
                struct backing_dev_info *bdi = inode_to_bdi(inode);
                unsigned long flags;
+               bool on_wblist;
 
                xas_lock_irqsave(&xas, flags);
                xas_load(&xas);
-               ret = folio_test_set_writeback(folio);
-               if (!ret) {
-                       bool on_wblist;
+               folio_test_set_writeback(folio);
 
-                       on_wblist = mapping_tagged(mapping,
-                                                  PAGECACHE_TAG_WRITEBACK);
+               on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
 
-                       xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
-                       if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
-                               struct bdi_writeback *wb = inode_to_wb(inode);
-
-                               wb_stat_mod(wb, WB_WRITEBACK, nr);
-                               if (!on_wblist)
-                                       wb_inode_writeback_start(wb);
-                       }
+               xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
+               if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
+                       struct bdi_writeback *wb = inode_to_wb(inode);
 
-                       /*
-                        * We can come through here when swapping
-                        * anonymous folios, so we don't necessarily
-                        * have an inode to track for sync.
-                        */
-                       if (mapping->host && !on_wblist)
-                               sb_mark_inode_writeback(mapping->host);
+                       wb_stat_mod(wb, WB_WRITEBACK, nr);
+                       if (!on_wblist)
+                               wb_inode_writeback_start(wb);
                }
+
+               /*
+                * We can come through here when swapping anonymous
+                * folios, so we don't necessarily have an inode to
+                * track for sync.
+                */
+               if (mapping->host && !on_wblist)
+                       sb_mark_inode_writeback(mapping->host);
                if (!folio_test_dirty(folio))
                        xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
                if (!keep_write)
                        xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
                xas_unlock_irqrestore(&xas, flags);
        } else {
-               ret = folio_test_set_writeback(folio);
-       }
-       if (!ret) {
-               lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
-               zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
+               folio_test_set_writeback(folio);
        }
+
+       lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
+       zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
        folio_memcg_unlock(folio);
+
        access_ret = arch_make_folio_accessible(folio);
        /*
         * If writeback has been triggered on a page that cannot be made
         * accessible, it is too late to recover here.
         */
        VM_BUG_ON_FOLIO(access_ret != 0, folio);
-
-       return ret;
 }
 EXPORT_SYMBOL(__folio_start_writeback);