mm: vmscan: reclaim writepage is IO cost
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 3 Jun 2020 23:03:09 +0000 (16:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 4 Jun 2020 03:09:49 +0000 (20:09 -0700)
The VM tries to balance reclaim pressure between anon and file so as to
reduce the amount of IO incurred due to the memory shortage.  It already
counts refaults and swapins, but in addition it should also count
writepage calls during reclaim.

For swap, this is obvious: it's IO that wouldn't have occurred if the
anonymous memory hadn't been under memory pressure.  From a relative
balancing point of view this makes sense as well: even if anon is cold and
reclaimable, a cache that isn't thrashing may have equally cold pages that
don't require IO to reclaim.

For file writeback, it's trickier: some of the reclaim writepage IO would
have likely occurred anyway due to dirty expiration.  But not all of it -
premature writeback reduces batching and generates additional writes.
Since the flushers are already woken up by the time the VM starts writing
cache pages one by one, let's assume that we'e likely causing writes that
wouldn't have happened without memory pressure.  In addition, the per-page
cost of IO would have probably been much cheaper if written in larger
batches from the flusher thread rather than the single-page-writes from
kswapd.

For our purposes - getting the trend right to accelerate convergence on a
stable state that doesn't require paging at all - this is sufficiently
accurate.  If we later wanted to optimize for sustained thrashing, we can
still refine the measurements.

Count all writepage calls from kswapd as IO cost toward the LRU that the
page belongs to.

Why do this dynamically?  Don't we know in advance that anon pages require
IO to reclaim, and so could build in a static bias?

First, scanning is not the same as reclaiming.  If all the anon pages are
referenced, we may not swap for a while just because we're scanning the
anon list.  During this time, however, it's important that we age
anonymous memory and the page cache at the same rate so that their
hot-cold gradients are comparable.  Everything else being equal, we still
want to reclaim the coldest memory overall.

Second, we keep copies in swap unless the page changes.  If there is
swap-backed data that's mostly read (tmpfs file) and has been swapped out
before, we can reclaim it without incurring additional IO.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Link: http://lkml.kernel.org/r/20200520232525.798933-14-hannes@cmpxchg.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/swap.h
include/linux/vmstat.h
mm/swap.c
mm/swap_state.c
mm/vmscan.c
mm/workingset.c

index 0b71bf75fb679f276ed6be3624665f2ef26fbbab..4c5974bb9ba94191f295bd0c2f7fc3daa87003b3 100644 (file)
@@ -334,7 +334,9 @@ extern unsigned long nr_free_pagecache_pages(void);
 
 
 /* linux/mm/swap.c */
-extern void lru_note_cost(struct page *);
+extern void lru_note_cost(struct lruvec *lruvec, bool file,
+                         unsigned int nr_pages);
+extern void lru_note_cost_page(struct page *);
 extern void lru_cache_add(struct page *);
 extern void lru_add_page_tail(struct page *page, struct page *page_tail,
                         struct lruvec *lruvec, struct list_head *head);
index 10cc932e209afca08ec0a74c372f51eb492645fa..3d12c34cd42a5732de7e10219b19ed1580ffcfb9 100644 (file)
@@ -26,6 +26,7 @@ struct reclaim_stat {
        unsigned nr_congested;
        unsigned nr_writeback;
        unsigned nr_immediate;
+       unsigned nr_pageout;
        unsigned nr_activate[2];
        unsigned nr_ref_keep;
        unsigned nr_unmap_fail;
index 4dff2123f695f1432d20184c68d454b65928662d..343675d629ae930691723aab1f74cbfda2a318af 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -278,18 +278,16 @@ void rotate_reclaimable_page(struct page *page)
        }
 }
 
-void lru_note_cost(struct page *page)
+void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
 {
-       struct lruvec *lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
-
        do {
                unsigned long lrusize;
 
                /* Record cost event */
-               if (page_is_file_lru(page))
-                       lruvec->file_cost++;
+               if (file)
+                       lruvec->file_cost += nr_pages;
                else
-                       lruvec->anon_cost++;
+                       lruvec->anon_cost += nr_pages;
 
                /*
                 * Decay previous events
@@ -311,6 +309,12 @@ void lru_note_cost(struct page *page)
        } while ((lruvec = parent_lruvec(lruvec)));
 }
 
+void lru_note_cost_page(struct page *page)
+{
+       lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
+                     page_is_file_lru(page), hpage_nr_pages(page));
+}
+
 static void __activate_page(struct page *page, struct lruvec *lruvec,
                            void *arg)
 {
index 1cd0b345ff7e1e394c05ef4acc59ca8e81bc4210..9d20b00627af433d057ef2b12052c654f5f7c36e 100644 (file)
@@ -442,7 +442,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 
        /* XXX: Move to lru_cache_add() when it supports new vs putback */
        spin_lock_irq(&page_pgdat(page)->lru_lock);
-       lru_note_cost(page);
+       lru_note_cost_page(page);
        spin_unlock_irq(&page_pgdat(page)->lru_lock);
 
        /* Caller will initiate read into locked page */
index d08640f0235c8857205446356391c8cdf5ee6dac..14ffe9ccf7ef081c1bb5ecc683bdf1ada15f9228 100644 (file)
@@ -1359,6 +1359,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
                        case PAGE_ACTIVATE:
                                goto activate_locked;
                        case PAGE_SUCCESS:
+                               stat->nr_pageout += hpage_nr_pages(page);
+
                                if (PageWriteback(page))
                                        goto keep;
                                if (PageDirty(page))
@@ -1964,6 +1966,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
        move_pages_to_lru(lruvec, &page_list);
 
        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
+       lru_note_cost(lruvec, file, stat.nr_pageout);
        item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
        if (!cgroup_reclaim(sc))
                __count_vm_events(item, nr_reclaimed);
index a6a2a740ed0ba2bea4d0e5bcb221a1a717aba3c0..d481ea452eeb09cfd62efc2016ff16ae21ccfe65 100644 (file)
@@ -367,7 +367,7 @@ void workingset_refault(struct page *page, void *shadow)
                SetPageWorkingset(page);
                /* XXX: Move to lru_cache_add() when it supports new vs putback */
                spin_lock_irq(&page_pgdat(page)->lru_lock);
-               lru_note_cost(page);
+               lru_note_cost_page(page);
                spin_unlock_irq(&page_pgdat(page)->lru_lock);
                inc_lruvec_state(lruvec, WORKINGSET_RESTORE);
        }