mm/lru: Convert __pagevec_lru_add_fn to take a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 14 May 2021 19:08:29 +0000 (15:08 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 18 Oct 2021 11:49:40 +0000 (07:49 -0400)
This saves five calls to compound_head(), totalling 60 bytes of text.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
include/trace/events/pagemap.h
mm/swap.c

index 1fd0185d66e80bb674ea477eb850f65ead40ed1f..171524d3526dbb6977e4403d6f8f8e4b78bdce29 100644 (file)
 #define PAGEMAP_MAPPEDDISK     0x0020u
 #define PAGEMAP_BUFFERS                0x0040u
 
-#define trace_pagemap_flags(page) ( \
-       (PageAnon(page)         ? PAGEMAP_ANONYMOUS  : PAGEMAP_FILE) | \
-       (page_mapped(page)      ? PAGEMAP_MAPPED     : 0) | \
-       (PageSwapCache(page)    ? PAGEMAP_SWAPCACHE  : 0) | \
-       (PageSwapBacked(page)   ? PAGEMAP_SWAPBACKED : 0) | \
-       (PageMappedToDisk(page) ? PAGEMAP_MAPPEDDISK : 0) | \
-       (page_has_private(page) ? PAGEMAP_BUFFERS    : 0) \
+#define trace_pagemap_flags(folio) ( \
+       (folio_test_anon(folio)         ? PAGEMAP_ANONYMOUS  : PAGEMAP_FILE) | \
+       (folio_mapped(folio)            ? PAGEMAP_MAPPED     : 0) | \
+       (folio_test_swapcache(folio)    ? PAGEMAP_SWAPCACHE  : 0) | \
+       (folio_test_swapbacked(folio)   ? PAGEMAP_SWAPBACKED : 0) | \
+       (folio_test_mappedtodisk(folio) ? PAGEMAP_MAPPEDDISK : 0) | \
+       (folio_test_private(folio)      ? PAGEMAP_BUFFERS    : 0) \
        )
 
 TRACE_EVENT(mm_lru_insertion,
 
-       TP_PROTO(struct page *page),
+       TP_PROTO(struct folio *folio),
 
-       TP_ARGS(page),
+       TP_ARGS(folio),
 
        TP_STRUCT__entry(
-               __field(struct page *,  page    )
+               __field(struct folio *, folio   )
                __field(unsigned long,  pfn     )
                __field(enum lru_list,  lru     )
                __field(unsigned long,  flags   )
        ),
 
        TP_fast_assign(
-               __entry->page   = page;
-               __entry->pfn    = page_to_pfn(page);
-               __entry->lru    = folio_lru_list(page_folio(page));
-               __entry->flags  = trace_pagemap_flags(page);
+               __entry->folio  = folio;
+               __entry->pfn    = folio_pfn(folio);
+               __entry->lru    = folio_lru_list(folio);
+               __entry->flags  = trace_pagemap_flags(folio);
        ),
 
        /* Flag format is based on page-types.c formatting for pagemap */
-       TP_printk("page=%p pfn=0x%lx lru=%d flags=%s%s%s%s%s%s",
-                       __entry->page,
+       TP_printk("folio=%p pfn=0x%lx lru=%d flags=%s%s%s%s%s%s",
+                       __entry->folio,
                        __entry->pfn,
                        __entry->lru,
                        __entry->flags & PAGEMAP_MAPPED         ? "M" : " ",
index 5c1674c98f82f6df5d0fddfd3e4287227ecd20a6..858b4a8220ca8401a4edc9e52498b11979d25765 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -992,17 +992,18 @@ void __pagevec_release(struct pagevec *pvec)
 }
 EXPORT_SYMBOL(__pagevec_release);
 
-static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
+static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
 {
-       int was_unevictable = TestClearPageUnevictable(page);
-       int nr_pages = thp_nr_pages(page);
+       int was_unevictable = folio_test_clear_unevictable(folio);
+       long nr_pages = folio_nr_pages(folio);
 
-       VM_BUG_ON_PAGE(PageLRU(page), page);
+       VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
 
        /*
-        * Page becomes evictable in two ways:
+        * A folio becomes evictable in two ways:
         * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
-        * 2) Before acquiring LRU lock to put the page to correct LRU and then
+        * 2) Before acquiring LRU lock to put the folio on the correct LRU
+        *    and then
         *   a) do PageLRU check with lock [check_move_unevictable_pages]
         *   b) do PageLRU check before lock [clear_page_mlock]
         *
@@ -1011,35 +1012,36 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
         *
         * #0: __pagevec_lru_add_fn             #1: clear_page_mlock
         *
-        * SetPageLRU()                         TestClearPageMlocked()
+        * folio_set_lru()                      folio_test_clear_mlocked()
         * smp_mb() // explicit ordering        // above provides strict
         *                                      // ordering
-        * PageMlocked()                        PageLRU()
+        * folio_test_mlocked()                 folio_test_lru()
         *
         *
-        * if '#1' does not observe setting of PG_lru by '#0' and fails
-        * isolation, the explicit barrier will make sure that page_evictable
-        * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
-        * can be reordered after PageMlocked check and can make '#1' to fail
-        * the isolation of the page whose Mlocked bit is cleared (#0 is also
-        * looking at the same page) and the evictable page will be stranded
-        * in an unevictable LRU.
+        * if '#1' does not observe setting of PG_lru by '#0' and
+        * fails isolation, the explicit barrier will make sure that
+        * folio_evictable check will put the folio on the correct
+        * LRU. Without smp_mb(), folio_set_lru() can be reordered
+        * after folio_test_mlocked() check and can make '#1' fail the
+        * isolation of the folio whose mlocked bit is cleared (#0 is
+        * also looking at the same folio) and the evictable folio will
+        * be stranded on an unevictable LRU.
         */
-       SetPageLRU(page);
+       folio_set_lru(folio);
        smp_mb__after_atomic();
 
-       if (page_evictable(page)) {
+       if (folio_evictable(folio)) {
                if (was_unevictable)
                        __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
        } else {
-               ClearPageActive(page);
-               SetPageUnevictable(page);
+               folio_clear_active(folio);
+               folio_set_unevictable(folio);
                if (!was_unevictable)
                        __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
        }
 
-       add_page_to_lru_list(page, lruvec);
-       trace_mm_lru_insertion(page);
+       lruvec_add_folio(lruvec, folio);
+       trace_mm_lru_insertion(folio);
 }
 
 /*
@@ -1053,11 +1055,10 @@ void __pagevec_lru_add(struct pagevec *pvec)
        unsigned long flags = 0;
 
        for (i = 0; i < pagevec_count(pvec); i++) {
-               struct page *page = pvec->pages[i];
-               struct folio *folio = page_folio(page);
+               struct folio *folio = page_folio(pvec->pages[i]);
 
                lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
-               __pagevec_lru_add_fn(page, lruvec);
+               __pagevec_lru_add_fn(folio, lruvec);
        }
        if (lruvec)
                unlock_page_lruvec_irqrestore(lruvec, flags);