mm/swap: Add folio_activate()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 27 Apr 2021 14:37:50 +0000 (10:37 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 18 Oct 2021 11:49:39 +0000 (07:49 -0400)
This replaces activate_page() and eliminates lots of calls to
compound_head().  Saves net 118 bytes of kernel text.  There are still
some redundant calls to page_folio() here which will be removed when
pagevec_lru_move_fn() is converted to use folios.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
include/trace/events/pagemap.h
mm/swap.c

index 92ad176210ff5bd9429836b94535e3cd1a170045..1fd0185d66e80bb674ea477eb850f65ead40ed1f 100644 (file)
@@ -60,23 +60,21 @@ TRACE_EVENT(mm_lru_insertion,
 
 TRACE_EVENT(mm_lru_activate,
 
-       TP_PROTO(struct page *page),
+       TP_PROTO(struct folio *folio),
 
-       TP_ARGS(page),
+       TP_ARGS(folio),
 
        TP_STRUCT__entry(
-               __field(struct page *,  page    )
+               __field(struct folio *, folio   )
                __field(unsigned long,  pfn     )
        ),
 
        TP_fast_assign(
-               __entry->page   = page;
-               __entry->pfn    = page_to_pfn(page);
+               __entry->folio  = folio;
+               __entry->pfn    = folio_pfn(folio);
        ),
 
-       /* Flag format is based on page-types.c formatting for pagemap */
-       TP_printk("page=%p pfn=0x%lx", __entry->page, __entry->pfn)
-
+       TP_printk("folio=%p pfn=0x%lx", __entry->folio, __entry->pfn)
 );
 
 #endif /* _TRACE_PAGEMAP_H */
index 5c688897c013ff2b3adecdf3156a1aa4a923707b..3860d6bc8c8a88b6a24d6b495c5b1046831ab4c2 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -300,15 +300,15 @@ void lru_note_cost_page(struct page *page)
                      page_is_file_lru(page), thp_nr_pages(page));
 }
 
-static void __activate_page(struct page *page, struct lruvec *lruvec)
+static void __folio_activate(struct folio *folio, struct lruvec *lruvec)
 {
-       if (!PageActive(page) && !PageUnevictable(page)) {
-               int nr_pages = thp_nr_pages(page);
+       if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
+               long nr_pages = folio_nr_pages(folio);
 
-               del_page_from_lru_list(page, lruvec);
-               SetPageActive(page);
-               add_page_to_lru_list(page, lruvec);
-               trace_mm_lru_activate(page);
+               lruvec_del_folio(lruvec, folio);
+               folio_set_active(folio);
+               lruvec_add_folio(lruvec, folio);
+               trace_mm_lru_activate(folio);
 
                __count_vm_events(PGACTIVATE, nr_pages);
                __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
@@ -317,6 +317,11 @@ static void __activate_page(struct page *page, struct lruvec *lruvec)
 }
 
 #ifdef CONFIG_SMP
+static void __activate_page(struct page *page, struct lruvec *lruvec)
+{
+       return __folio_activate(page_folio(page), lruvec);
+}
+
 static void activate_page_drain(int cpu)
 {
        struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
@@ -330,16 +335,16 @@ static bool need_activate_page_drain(int cpu)
        return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
 }
 
-static void activate_page(struct page *page)
+static void folio_activate(struct folio *folio)
 {
-       page = compound_head(page);
-       if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+       if (folio_test_lru(folio) && !folio_test_active(folio) &&
+           !folio_test_unevictable(folio)) {
                struct pagevec *pvec;
 
+               folio_get(folio);
                local_lock(&lru_pvecs.lock);
                pvec = this_cpu_ptr(&lru_pvecs.activate_page);
-               get_page(page);
-               if (pagevec_add_and_need_flush(pvec, page))
+               if (pagevec_add_and_need_flush(pvec, &folio->page))
                        pagevec_lru_move_fn(pvec, __activate_page);
                local_unlock(&lru_pvecs.lock);
        }
@@ -350,17 +355,15 @@ static inline void activate_page_drain(int cpu)
 {
 }
 
-static void activate_page(struct page *page)
+static void folio_activate(struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
        struct lruvec *lruvec;
 
-       page = &folio->page;
-       if (TestClearPageLRU(page)) {
+       if (folio_test_clear_lru(folio)) {
                lruvec = folio_lruvec_lock_irq(folio);
-               __activate_page(page, lruvec);
+               __folio_activate(folio, lruvec);
                unlock_page_lruvec_irq(lruvec);
-               SetPageLRU(page);
+               folio_set_lru(folio);
        }
 }
 #endif
@@ -425,7 +428,7 @@ void mark_page_accessed(struct page *page)
                 * LRU on the next drain.
                 */
                if (PageLRU(page))
-                       activate_page(page);
+                       folio_activate(page_folio(page));
                else
                        __lru_cache_activate_page(page);
                ClearPageReferenced(page);