memory-failure: convert delete_from_lru_cache() to take a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 17 Nov 2023 16:14:44 +0000 (16:14 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 11 Dec 2023 00:51:41 +0000 (16:51 -0800)
All three callers now have a folio; pass it in instead of the page.
Saves five calls to compound_head().

Link: https://lkml.kernel.org/r/20231117161447.2461643-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory-failure.c

index d2764fd3e448419991286287ffb1b51b37a2219a..e73f2047ffcbedc02fc0ed7252ed4cee901158aa 100644 (file)
@@ -902,26 +902,26 @@ static const char * const action_page_types[] = {
  * The page count will stop it from being freed by unpoison.
  * Stress tests should be aware of this memory leak problem.
  */
-static int delete_from_lru_cache(struct page *p)
+static int delete_from_lru_cache(struct folio *folio)
 {
-       if (isolate_lru_page(p)) {
+       if (folio_isolate_lru(folio)) {
                /*
                 * Clear sensible page flags, so that the buddy system won't
-                * complain when the page is unpoison-and-freed.
+                * complain when the folio is unpoison-and-freed.
                 */
-               ClearPageActive(p);
-               ClearPageUnevictable(p);
+               folio_clear_active(folio);
+               folio_clear_unevictable(folio);
 
                /*
                 * Poisoned page might never drop its ref count to 0 so we have
                 * to uncharge it manually from its memcg.
                 */
-               mem_cgroup_uncharge(page_folio(p));
+               mem_cgroup_uncharge(folio);
 
                /*
-                * drop the page count elevated by isolate_lru_page()
+                * drop the refcount elevated by folio_isolate_lru()
                 */
-               put_page(p);
+               folio_put(folio);
                return 0;
        }
        return -EIO;
@@ -1019,7 +1019,7 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p)
        struct address_space *mapping;
        bool extra_pins;
 
-       delete_from_lru_cache(p);
+       delete_from_lru_cache(folio);
 
        /*
         * For anonymous folios the only reference left
@@ -1146,7 +1146,7 @@ static int me_swapcache_dirty(struct page_state *ps, struct page *p)
        /* Trigger EIO in shmem: */
        folio_clear_uptodate(folio);
 
-       ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
+       ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_DELAYED;
        folio_unlock(folio);
 
        if (ret == MF_DELAYED)
@@ -1165,7 +1165,7 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p)
 
        delete_from_swap_cache(folio);
 
-       ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
+       ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED;
        folio_unlock(folio);
 
        if (has_extra_refcount(ps, p, false))