truncate: Add invalidate_complete_folio2()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 28 Jul 2021 19:52:34 +0000 (15:52 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 8 Jan 2022 05:28:41 +0000 (00:28 -0500)
Convert invalidate_complete_page2() to invalidate_complete_folio2().
Use filemap_free_folio() to free the page instead of calling ->freepage
manually.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
mm/filemap.c
mm/internal.h
mm/truncate.c

index 4c39e09a2f518b63720cd19a6f5d2c990778679d..72ea824db6f075c510a55cc33bedc4504c2fbd43 100644 (file)
@@ -229,8 +229,7 @@ void __filemap_remove_folio(struct folio *folio, void *shadow)
        page_cache_delete(mapping, folio, shadow);
 }
 
-static void filemap_free_folio(struct address_space *mapping,
-                               struct folio *folio)
+void filemap_free_folio(struct address_space *mapping, struct folio *folio)
 {
        void (*freepage)(struct page *);
 
index f9967b0be8bf7c369a64bd5a3b94efe6c188755e..e5f3ff3ae24e90d7c20e6b5c338b8151d02e3c75 100644 (file)
@@ -92,6 +92,7 @@ static inline void force_page_cache_readahead(struct address_space *mapping,
 
 unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
                pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
+void filemap_free_folio(struct address_space *mapping, struct folio *folio);
 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
 
 /**
index ef6980b240e2f6133e1cdd97b095f7850b0bc460..5370094641d6fa26636cf4c7c765c6af2439cd6a 100644 (file)
@@ -571,31 +571,29 @@ void invalidate_mapping_pagevec(struct address_space *mapping,
  * shrink_page_list() has a temp ref on them, or because they're transiently
  * sitting in the lru_cache_add() pagevecs.
  */
-static int
-invalidate_complete_page2(struct address_space *mapping, struct page *page)
+static int invalidate_complete_folio2(struct address_space *mapping,
+                                       struct folio *folio)
 {
-       if (page->mapping != mapping)
+       if (folio->mapping != mapping)
                return 0;
 
-       if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
+       if (folio_has_private(folio) &&
+           !filemap_release_folio(folio, GFP_KERNEL))
                return 0;
 
        spin_lock(&mapping->host->i_lock);
        xa_lock_irq(&mapping->i_pages);
-       if (PageDirty(page))
+       if (folio_test_dirty(folio))
                goto failed;
 
-       BUG_ON(page_has_private(page));
-       __delete_from_page_cache(page, NULL);
+       BUG_ON(folio_has_private(folio));
+       __filemap_remove_folio(folio, NULL);
        xa_unlock_irq(&mapping->i_pages);
        if (mapping_shrinkable(mapping))
                inode_add_lru(mapping->host);
        spin_unlock(&mapping->host->i_lock);
 
-       if (mapping->a_ops->freepage)
-               mapping->a_ops->freepage(page);
-
-       put_page(page); /* pagecache ref */
+       filemap_free_folio(mapping, folio);
        return 1;
 failed:
        xa_unlock_irq(&mapping->i_pages);
@@ -679,8 +677,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 
                        ret2 = do_launder_page(mapping, &folio->page);
                        if (ret2 == 0) {
-                               if (!invalidate_complete_page2(mapping,
-                                                               &folio->page))
+                               if (!invalidate_complete_folio2(mapping, folio))
                                        ret2 = -EBUSY;
                        }
                        if (ret2 < 0)