truncate,shmem: Add truncate_inode_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 2 Dec 2021 21:01:55 +0000 (16:01 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 8 Jan 2022 05:28:41 +0000 (00:28 -0500)
Convert all callers of truncate_inode_page() to call
truncate_inode_folio() instead, and move the declaration to mm/internal.h.
Move the assertion that the caller is not passing in a tail page to
generic_error_remove_page().  We can't entirely remove the struct page
from the callers yet because the page pointer in the pvec might be a
shadow/dax/swap entry instead of actually a page.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
include/linux/mm.h
mm/internal.h
mm/shmem.c
mm/truncate.c

index c9cdb26802fb044ef02c81e409c7ddac18cd45f4..d8b7d7ed14ddf7494976ff2ff56e62e5c9c2fc1b 100644 (file)
@@ -1859,7 +1859,6 @@ extern void truncate_pagecache(struct inode *inode, loff_t new);
 extern void truncate_setsize(struct inode *inode, loff_t newsize);
 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
-int truncate_inode_page(struct address_space *mapping, struct page *page);
 int generic_error_remove_page(struct address_space *mapping, struct page *page);
 int invalidate_inode_page(struct page *page);
 
index 1ca93c6cb18cd3c667dbb2b2e751084603a8d1bc..f9967b0be8bf7c369a64bd5a3b94efe6c188755e 100644 (file)
@@ -92,6 +92,7 @@ static inline void force_page_cache_readahead(struct address_space *mapping,
 
 unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
                pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
+int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
 
 /**
  * folio_evictable - Test whether a folio is evictable.
index 40da9075374b5133fc505253f9605dd5c8dc812e..dbef008fb6e57e7e826a33107ebc7c720ab28a22 100644 (file)
@@ -950,7 +950,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        index += folio_nr_pages(folio) - 1;
 
                        if (!unfalloc || !folio_test_uptodate(folio))
-                               truncate_inode_page(mapping, &folio->page);
+                               truncate_inode_folio(mapping, folio);
                        folio_unlock(folio);
                }
                pagevec_remove_exceptionals(&pvec);
@@ -1027,7 +1027,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                                }
                                VM_BUG_ON_PAGE(PageWriteback(page), page);
                                if (shmem_punch_compound(page, start, end))
-                                       truncate_inode_page(mapping, page);
+                                       truncate_inode_folio(mapping,
+                                                            page_folio(page));
                                else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
                                        /* Wipe the page and don't get stuck */
                                        clear_highpage(page);
index c98feea75a107a121b578bd9844f3bce39c028ae..0000424fc56b6ef65c73edc5866dc8434360c335 100644 (file)
@@ -218,12 +218,9 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
        return ret;
 }
 
-int truncate_inode_page(struct address_space *mapping, struct page *page)
+int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
-       VM_BUG_ON_PAGE(PageTail(page), page);
-
-       if (page->mapping != mapping)
+       if (folio->mapping != mapping)
                return -EIO;
 
        truncate_cleanup_folio(folio);
@@ -236,6 +233,8 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
  */
 int generic_error_remove_page(struct address_space *mapping, struct page *page)
 {
+       VM_BUG_ON_PAGE(PageTail(page), page);
+
        if (!mapping)
                return -EINVAL;
        /*
@@ -244,7 +243,7 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page)
         */
        if (!S_ISREG(mapping->host->i_mode))
                return -EIO;
-       return truncate_inode_page(mapping, page);
+       return truncate_inode_folio(mapping, page_folio(page));
 }
 EXPORT_SYMBOL(generic_error_remove_page);
 
@@ -395,18 +394,20 @@ void truncate_inode_pages_range(struct address_space *mapping,
 
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
+                       struct folio *folio;
 
                        /* We rely upon deletion not changing page->index */
                        index = indices[i];
 
                        if (xa_is_value(page))
                                continue;
+                       folio = page_folio(page);
 
-                       lock_page(page);
-                       WARN_ON(page_to_index(page) != index);
-                       wait_on_page_writeback(page);
-                       truncate_inode_page(mapping, page);
-                       unlock_page(page);
+                       folio_lock(folio);
+                       VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
+                       folio_wait_writeback(folio);
+                       truncate_inode_folio(mapping, folio);
+                       folio_unlock(folio);
                }
                truncate_exceptional_pvec_entries(mapping, &pvec, indices);
                pagevec_release(&pvec);