mm/memory-failure: convert free_raw_hwp_pages() to folios
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Thu, 12 Jan 2023 20:46:04 +0000 (14:46 -0600)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 3 Feb 2023 06:33:02 +0000 (22:33 -0800)
Change free_raw_hwp_pages() to folio_free_raw_hwp(), converts two users of
hugetlb specific page macro users to their folio equivalents.

Link: https://lkml.kernel.org/r/20230112204608.80136-5-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Acked-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory-failure.c

index d4aaed2756af55a8e97cf4e4abfc3a02ab33bbc6..a2835907caf8e74d63c4acac0da42842ea86d74e 100644 (file)
@@ -1766,23 +1766,23 @@ static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page)
        return ret;
 }
 
-static unsigned long free_raw_hwp_pages(struct page *hpage, bool move_flag)
+static unsigned long folio_free_raw_hwp(struct folio *folio, bool move_flag)
 {
        /*
-        * HPageVmemmapOptimized hugepages can't be freed because struct
+        * hugetlb_vmemmap_optimized hugepages can't be freed because struct
         * pages for tail pages are required but they don't exist.
         */
-       if (move_flag && HPageVmemmapOptimized(hpage))
+       if (move_flag && folio_test_hugetlb_vmemmap_optimized(folio))
                return 0;
 
        /*
-        * HPageRawHwpUnreliable hugepages shouldn't be unpoisoned by
+        * hugetlb_raw_hwp_unreliable hugepages shouldn't be unpoisoned by
         * definition.
         */
-       if (HPageRawHwpUnreliable(hpage))
+       if (folio_test_hugetlb_raw_hwp_unreliable(folio))
                return 0;
 
-       return __free_raw_hwp_pages(hpage, move_flag);
+       return __free_raw_hwp_pages(&folio->page, move_flag);
 }
 
 void folio_clear_hugetlb_hwpoison(struct folio *folio)
@@ -1790,7 +1790,7 @@ void folio_clear_hugetlb_hwpoison(struct folio *folio)
        if (folio_test_hugetlb_raw_hwp_unreliable(folio))
                return;
        folio_clear_hwpoison(folio);
-       free_raw_hwp_pages(&folio->page, true);
+       folio_free_raw_hwp(folio, true);
 }
 
 /*
@@ -1929,7 +1929,7 @@ static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *
        return 0;
 }
 
-static inline unsigned long free_raw_hwp_pages(struct page *hpage, bool flag)
+static inline unsigned long folio_free_raw_hwp(struct folio *folio, bool flag)
 {
        return 0;
 }
@@ -2336,6 +2336,7 @@ core_initcall(memory_failure_init);
 int unpoison_memory(unsigned long pfn)
 {
        struct page *page;
+       struct folio *folio;
        struct page *p;
        int ret = -EBUSY;
        unsigned long count = 1;
@@ -2348,6 +2349,7 @@ int unpoison_memory(unsigned long pfn)
 
        p = pfn_to_page(pfn);
        page = compound_head(p);
+       folio = page_folio(p);
 
        mutex_lock(&mf_mutex);
 
@@ -2389,7 +2391,7 @@ int unpoison_memory(unsigned long pfn)
        if (!ret) {
                if (PageHuge(p)) {
                        huge = true;
-                       count = free_raw_hwp_pages(page, false);
+                       count = folio_free_raw_hwp(folio, false);
                        if (count == 0) {
                                ret = -EBUSY;
                                goto unlock_mutex;
@@ -2405,7 +2407,7 @@ int unpoison_memory(unsigned long pfn)
        } else {
                if (PageHuge(p)) {
                        huge = true;
-                       count = free_raw_hwp_pages(page, false);
+                       count = folio_free_raw_hwp(folio, false);
                        if (count == 0) {
                                ret = -EBUSY;
                                put_page(page);