mm: use free_unref_folios() in put_pages_list()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 27 Feb 2024 17:42:41 +0000 (17:42 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 5 Mar 2024 01:01:24 +0000 (17:01 -0800)
Break up the list of folios into batches here so that the folios are more
likely to be cache hot when doing the rest of the processing.

Link: https://lkml.kernel.org/r/20240227174254.710559-8-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/swap.c

index ee8b131bf32cf1859f80df5c0f6d725e8a0aea9d..ad3f2e9448a4cde106f48de31dc002ef8e87db6a 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -138,22 +138,25 @@ EXPORT_SYMBOL(__folio_put);
  */
 void put_pages_list(struct list_head *pages)
 {
-       struct folio *folio, *next;
+       struct folio_batch fbatch;
+       struct folio *folio;
 
-       list_for_each_entry_safe(folio, next, pages, lru) {
-               if (!folio_put_testzero(folio)) {
-                       list_del(&folio->lru);
+       folio_batch_init(&fbatch);
+       list_for_each_entry(folio, pages, lru) {
+               if (!folio_put_testzero(folio))
                        continue;
-               }
                if (folio_test_large(folio)) {
-                       list_del(&folio->lru);
                        __folio_put_large(folio);
                        continue;
                }
                /* LRU flag must be clear because it's passed using the lru */
+               if (folio_batch_add(&fbatch, folio) > 0)
+                       continue;
+               free_unref_folios(&fbatch);
        }
 
-       free_unref_page_list(pages);
+       if (fbatch.nr)
+               free_unref_folios(&fbatch);
        INIT_LIST_HEAD(pages);
 }
 EXPORT_SYMBOL(put_pages_list);