mm/swap: convert add_to_swap_cache() to take a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 2 Sep 2022 19:46:08 +0000 (20:46 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 3 Oct 2022 21:02:46 +0000 (14:02 -0700)
With all callers using folios, we can convert add_to_swap_cache() to take
a folio and use it throughout.

Link: https://lkml.kernel.org/r/20220902194653.1739778-13-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/shmem.c
mm/swap.h
mm/swap_state.c

index 4113f1b9d4a82b318c3fe6a86c5929b7b1eec432..ced76c229b960d46964eb881f961bf667de364de 100644 (file)
@@ -1406,7 +1406,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        if (list_empty(&info->swaplist))
                list_add(&info->swaplist, &shmem_swaplist);
 
-       if (add_to_swap_cache(&folio->page, swap,
+       if (add_to_swap_cache(folio, swap,
                        __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
                        NULL) == 0) {
                spin_lock_irq(&info->lock);
index 0ffa5b478051ad7896c7d2114a136c41532ff647..29e38f3d82d033b8d28791b4a8f91e64a4998c90 100644 (file)
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -32,7 +32,7 @@ extern struct address_space *swapper_spaces[];
 void show_swap_cache_info(void);
 bool add_to_swap(struct folio *folio);
 void *get_shadow_from_swap_cache(swp_entry_t entry);
-int add_to_swap_cache(struct page *page, swp_entry_t entry,
+int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
                      gfp_t gfp, void **shadowp);
 void __delete_from_swap_cache(struct folio *folio,
                              swp_entry_t entry, void *shadow);
@@ -122,7 +122,7 @@ static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
        return NULL;
 }
 
-static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
+static inline int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
                                        gfp_t gfp_mask, void **shadowp)
 {
        return -1;
index b1e181fc526870561b7dc3f329cd842f4ca06cf6..ecf1accc2fb18269a01cd70f8c0553898c18308f 100644 (file)
@@ -85,21 +85,21 @@ void *get_shadow_from_swap_cache(swp_entry_t entry)
  * add_to_swap_cache resembles filemap_add_folio on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
  */
-int add_to_swap_cache(struct page *page, swp_entry_t entry,
+int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
                        gfp_t gfp, void **shadowp)
 {
        struct address_space *address_space = swap_address_space(entry);
        pgoff_t idx = swp_offset(entry);
-       XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
-       unsigned long i, nr = thp_nr_pages(page);
+       XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
+       unsigned long i, nr = folio_nr_pages(folio);
        void *old;
 
-       VM_BUG_ON_PAGE(!PageLocked(page), page);
-       VM_BUG_ON_PAGE(PageSwapCache(page), page);
-       VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
+       VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+       VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
+       VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
 
-       page_ref_add(page, nr);
-       SetPageSwapCache(page);
+       folio_ref_add(folio, nr);
+       folio_set_swapcache(folio);
 
        do {
                xas_lock_irq(&xas);
@@ -107,19 +107,19 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry,
                if (xas_error(&xas))
                        goto unlock;
                for (i = 0; i < nr; i++) {
-                       VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
+                       VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
                        old = xas_load(&xas);
                        if (xa_is_value(old)) {
                                if (shadowp)
                                        *shadowp = old;
                        }
-                       set_page_private(page + i, entry.val + i);
-                       xas_store(&xas, page);
+                       set_page_private(folio_page(folio, i), entry.val + i);
+                       xas_store(&xas, folio);
                        xas_next(&xas);
                }
                address_space->nrpages += nr;
-               __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
-               __mod_lruvec_page_state(page, NR_SWAPCACHE, nr);
+               __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
+               __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
 unlock:
                xas_unlock_irq(&xas);
        } while (xas_nomem(&xas, gfp));
@@ -127,8 +127,8 @@ unlock:
        if (!xas_error(&xas))
                return 0;
 
-       ClearPageSwapCache(page);
-       page_ref_sub(page, nr);
+       folio_clear_swapcache(folio);
+       folio_ref_sub(folio, nr);
        return xas_error(&xas);
 }
 
@@ -194,7 +194,7 @@ bool add_to_swap(struct folio *folio)
        /*
         * Add it to the swap cache.
         */
-       err = add_to_swap_cache(&folio->page, entry,
+       err = add_to_swap_cache(folio, entry,
                        __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
        if (err)
                /*
@@ -484,7 +484,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                goto fail_unlock;
 
        /* May fail (-ENOMEM) if XArray node allocation failed. */
-       if (add_to_swap_cache(&folio->page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
+       if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
                goto fail_unlock;
 
        mem_cgroup_swapin_uncharge_swap(entry);