mm: free folios in a batch in shrink_folio_list()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 27 Feb 2024 17:42:45 +0000 (17:42 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 5 Mar 2024 01:01:25 +0000 (17:01 -0800)
Use free_unref_page_batch() to free the folios.  This may increase the
number of IPIs from calling try_to_unmap_flush() more often, but that's
going to be very workload-dependent.  It may even reduce the number of
IPIs as we now batch-free large folios instead of freeing them one at a
time.

Link: https://lkml.kernel.org/r/20240227174254.710559-12-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmscan.c

index dcfbe617e9efc25f6bc07d83fb790cde567562a6..144879d5bb895fc518fff4529ca5ee186012a507 100644 (file)
@@ -1006,14 +1006,15 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
                struct pglist_data *pgdat, struct scan_control *sc,
                struct reclaim_stat *stat, bool ignore_references)
 {
+       struct folio_batch free_folios;
        LIST_HEAD(ret_folios);
-       LIST_HEAD(free_folios);
        LIST_HEAD(demote_folios);
        unsigned int nr_reclaimed = 0;
        unsigned int pgactivate = 0;
        bool do_demote_pass;
        struct swap_iocb *plug = NULL;
 
+       folio_batch_init(&free_folios);
        memset(stat, 0, sizeof(*stat));
        cond_resched();
        do_demote_pass = can_demote(pgdat->node_id, sc);
@@ -1412,14 +1413,11 @@ free_it:
                 */
                nr_reclaimed += nr_pages;
 
-               /*
-                * Is there need to periodically free_folio_list? It would
-                * appear not as the counts should be low
-                */
-               if (unlikely(folio_test_large(folio)))
-                       destroy_large_folio(folio);
-               else
-                       list_add(&folio->lru, &free_folios);
+               if (folio_batch_add(&free_folios, folio) == 0) {
+                       mem_cgroup_uncharge_folios(&free_folios);
+                       try_to_unmap_flush();
+                       free_unref_folios(&free_folios);
+               }
                continue;
 
 activate_locked_split:
@@ -1483,9 +1481,9 @@ keep:
 
        pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
 
-       mem_cgroup_uncharge_list(&free_folios);
+       mem_cgroup_uncharge_folios(&free_folios);
        try_to_unmap_flush();
-       free_unref_page_list(&free_folios);
+       free_unref_folios(&free_folios);
 
        list_splice(&ret_folios, folio_list);
        count_vm_events(PGACTIVATE, pgactivate);