mm: remove page_add_new_anon_rmap and lru_cache_add_inactive_or_unevictable
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 11 Dec 2023 16:22:14 +0000 (16:22 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 29 Dec 2023 19:58:27 +0000 (11:58 -0800)
All callers have now been converted to folio_add_new_anon_rmap() and
folio_add_lru_vma() so we can remove the wrapper.

Link: https://lkml.kernel.org/r/20231211162214.2146080-10-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/rmap.h
include/linux/swap.h
mm/folio-compat.c

index af6a32b6f3e7b51732224ea3df2591dece7f0b81..0ae2bb0e77f5de6c8a46922d1ebf1b816bae4e17 100644 (file)
@@ -197,8 +197,6 @@ typedef int __bitwise rmap_t;
 void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
 void page_add_anon_rmap(struct page *, struct vm_area_struct *,
                unsigned long address, rmap_t flags);
-void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
-               unsigned long address);
 void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
                unsigned long address);
 void page_add_file_rmap(struct page *, struct vm_area_struct *,
index f6dd6575b90545c3c927efbb033c1134e091b69a..3e1909087f6a9264b0d38f4d3481407718836031 100644 (file)
@@ -397,9 +397,6 @@ void folio_deactivate(struct folio *folio);
 void folio_mark_lazyfree(struct folio *folio);
 extern void swap_setup(void);
 
-extern void lru_cache_add_inactive_or_unevictable(struct page *page,
-                                               struct vm_area_struct *vma);
-
 /* linux/mm/vmscan.c */
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
index aee3b9a16828539e2168f6b7fbefe278d5746d54..50412014f16f723b4b393a82036a099567f79747 100644 (file)
@@ -77,12 +77,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc,
 }
 EXPORT_SYMBOL(redirty_page_for_writepage);
 
-void lru_cache_add_inactive_or_unevictable(struct page *page,
-               struct vm_area_struct *vma)
-{
-       folio_add_lru_vma(page_folio(page), vma);
-}
-
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
                pgoff_t index, gfp_t gfp)
 {
@@ -122,13 +116,3 @@ void putback_lru_page(struct page *page)
 {
        folio_putback_lru(page_folio(page));
 }
-
-#ifdef CONFIG_MMU
-void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma,
-               unsigned long address)
-{
-       VM_BUG_ON_PAGE(PageTail(page), page);
-
-       return folio_add_new_anon_rmap((struct folio *)page, vma, address);
-}
-#endif