mm/swap: inline folio_set_swap_entry() and folio_swap_entry()
authorDavid Hildenbrand <david@redhat.com>
Mon, 21 Aug 2023 16:08:48 +0000 (18:08 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 24 Aug 2023 23:20:28 +0000 (16:20 -0700)
Let's simply work on the folio directly and remove the helpers.

Link: https://lkml.kernel.org/r/20230821160849.531668-4-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Reviewed-by: Chris Li <chrisl@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/swap.h
mm/memory.c
mm/shmem.c
mm/swap_state.c
mm/swapfile.c
mm/util.c
mm/vmscan.c
mm/zswap.c

index 352eca0a75bc6ae27c1881f2b3a34006eef39528..493487ed7c388b31bf8a9bef37fea86c365e33a2 100644 (file)
@@ -333,25 +333,15 @@ struct swap_info_struct {
                                           */
 };
 
-static inline swp_entry_t folio_swap_entry(struct folio *folio)
-{
-       return folio->swap;
-}
-
 static inline swp_entry_t page_swap_entry(struct page *page)
 {
        struct folio *folio = page_folio(page);
-       swp_entry_t entry = folio_swap_entry(folio);
+       swp_entry_t entry = folio->swap;
 
        entry.val += folio_page_idx(folio, page);
        return entry;
 }
 
-static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
-{
-       folio->swap = entry;
-}
-
 /* linux/mm/workingset.c */
 bool workingset_test_recent(void *shadow, bool file, bool *workingset);
 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
index d104a38e8545a7fefd19baddd1aa6096a0556eaa..421fcef3a3e7fa048919b2ee3876a2b83d345448 100644 (file)
@@ -3828,7 +3828,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                                folio_add_lru(folio);
 
                                /* To provide entry to swap_readpage() */
-                               folio_set_swap_entry(folio, entry);
+                               folio->swap = entry;
                                swap_readpage(page, true, NULL);
                                folio->private = NULL;
                        }
index 99fb60ec2c3d53c5b3e0b51e17f2e6930538ccfa..980289be5f63519172124351ad601708c9696cfe 100644 (file)
@@ -1642,7 +1642,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
        int error;
 
        old = *foliop;
-       entry = folio_swap_entry(old);
+       entry = old->swap;
        swap_index = swp_offset(entry);
        swap_mapping = swap_address_space(entry);
 
@@ -1663,7 +1663,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
        __folio_set_locked(new);
        __folio_set_swapbacked(new);
        folio_mark_uptodate(new);
-       folio_set_swap_entry(new, entry);
+       new->swap = entry;
        folio_set_swapcache(new);
 
        /*
@@ -1785,7 +1785,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
        /* We have to do this with folio locked to prevent races */
        folio_lock(folio);
        if (!folio_test_swapcache(folio) ||
-           folio_swap_entry(folio).val != swap.val ||
+           folio->swap.val != swap.val ||
            !shmem_confirm_swap(mapping, index, swap)) {
                error = -EEXIST;
                goto unlock;
index 2f24178100520b080d781526aa660867a4e027eb..b3b14bd0dd6447f47aea2df42e8348f15660c73a 100644 (file)
@@ -100,7 +100,7 @@ int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
 
        folio_ref_add(folio, nr);
        folio_set_swapcache(folio);
-       folio_set_swap_entry(folio, entry);
+       folio->swap = entry;
 
        do {
                xas_lock_irq(&xas);
@@ -156,8 +156,7 @@ void __delete_from_swap_cache(struct folio *folio,
                VM_BUG_ON_PAGE(entry != folio, entry);
                xas_next(&xas);
        }
-       entry.val = 0;
-       folio_set_swap_entry(folio, entry);
+       folio->swap.val = 0;
        folio_clear_swapcache(folio);
        address_space->nrpages -= nr;
        __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
@@ -233,7 +232,7 @@ fail:
  */
 void delete_from_swap_cache(struct folio *folio)
 {
-       swp_entry_t entry = folio_swap_entry(folio);
+       swp_entry_t entry = folio->swap;
        struct address_space *address_space = swap_address_space(entry);
 
        xa_lock_irq(&address_space->i_pages);
index bd9d904671b9476321f08ceb3be4ebc6be030c66..e52f486834ebf79e31c21f00845969050d256d38 100644 (file)
@@ -1536,7 +1536,7 @@ unlock_out:
 
 static bool folio_swapped(struct folio *folio)
 {
-       swp_entry_t entry = folio_swap_entry(folio);
+       swp_entry_t entry = folio->swap;
        struct swap_info_struct *si = _swap_info_get(entry);
 
        if (!si)
index cde229b05eb351a806e1f25d072419af63148d39..f31e2ca62cfae8e00bd1878fe1d4252834a2b303 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -764,7 +764,7 @@ struct address_space *folio_mapping(struct folio *folio)
                return NULL;
 
        if (unlikely(folio_test_swapcache(folio)))
-               return swap_address_space(folio_swap_entry(folio));
+               return swap_address_space(folio->swap);
 
        mapping = folio->mapping;
        if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
index c7c149cb8d6628d6d89710f89f78192cea9cb108..6f13394b112eaea798ca50ff97fe5efa52747a3e 100644 (file)
@@ -1423,7 +1423,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
        }
 
        if (folio_test_swapcache(folio)) {
-               swp_entry_t swap = folio_swap_entry(folio);
+               swp_entry_t swap = folio->swap;
 
                if (reclaimed && !mapping_exiting(mapping))
                        shadow = workingset_eviction(folio, target_memcg);
index 7300b98d4a03bdc9d579d4250a1bc48a349db338..412b1409a0d78afbeba2aabc640084e83b39602b 100644 (file)
@@ -1190,7 +1190,7 @@ static void zswap_fill_page(void *ptr, unsigned long value)
 
 bool zswap_store(struct folio *folio)
 {
-       swp_entry_t swp = folio_swap_entry(folio);
+       swp_entry_t swp = folio->swap;
        int type = swp_type(swp);
        pgoff_t offset = swp_offset(swp);
        struct page *page = &folio->page;
@@ -1370,7 +1370,7 @@ shrink:
 
 bool zswap_load(struct folio *folio)
 {
-       swp_entry_t swp = folio_swap_entry(folio);
+       swp_entry_t swp = folio->swap;
        int type = swp_type(swp);
        pgoff_t offset = swp_offset(swp);
        struct page *page = &folio->page;