mm: change to return bool for isolate_lru_page()
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Wed, 15 Feb 2023 10:39:35 +0000 (18:39 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 20 Feb 2023 20:46:17 +0000 (12:46 -0800)
The isolate_lru_page() can only return 0 or -EBUSY, and most users did not
care about the negative error of isolate_lru_page(), except one user in
add_page_for_migration().  So we can convert the isolate_lru_page() to
return a boolean value, which can help to make the code more clear when
checking the return value of isolate_lru_page().

Also convert all users' logic of checking the isolation state.

No functional changes intended.

Link: https://lkml.kernel.org/r/3074c1ab628d9dbf139b33f248a8bc253a3f95f0.1676424378.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/folio-compat.c
mm/internal.h
mm/khugepaged.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/migrate.c
mm/migrate_device.c

index 540373cf904e99de77fb7ae5837e0cbd6fc86674..cabcd1de9ecbb206ad7f95e3744c4b2dabf5e92b 100644 (file)
@@ -113,17 +113,11 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
 }
 EXPORT_SYMBOL(grab_cache_page_write_begin);
 
-int isolate_lru_page(struct page *page)
+bool isolate_lru_page(struct page *page)
 {
-       bool ret;
-
        if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
-               return -EBUSY;
-       ret = folio_isolate_lru((struct folio *)page);
-       if (ret)
-               return 0;
-
-       return -EBUSY;
+               return false;
+       return folio_isolate_lru((struct folio *)page);
 }
 
 void putback_lru_page(struct page *page)
index 8645e8496537fe5294174ff3d94ab83afef7be6a..fc01fd092ea584d1954a680499a463e24b97a2de 100644 (file)
@@ -187,7 +187,7 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
 /*
  * in mm/vmscan.c:
  */
-int isolate_lru_page(struct page *page);
+bool isolate_lru_page(struct page *page);
 bool folio_isolate_lru(struct folio *folio);
 void putback_lru_page(struct page *page);
 void folio_putback_lru(struct folio *folio);
index 15eebab0fbb5b5244b5c6806ac8d2d0d80ec4bc8..987281ead49ef6a53209cee6b37c37eb24c3e769 100644 (file)
@@ -636,7 +636,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                 * Isolate the page to avoid collapsing an hugepage
                 * currently in use by the VM.
                 */
-               if (isolate_lru_page(page)) {
+               if (!isolate_lru_page(page)) {
                        unlock_page(page);
                        result = SCAN_DEL_PAGE_LRU;
                        goto out;
index 3e3cdb9bed952f4130931a8dc17fa08473d786d2..25f2465d5a37b55d9588e91f3f70e41785261ecd 100644 (file)
@@ -6176,7 +6176,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
                target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
                if (target_type == MC_TARGET_PAGE) {
                        page = target.page;
-                       if (!isolate_lru_page(page)) {
+                       if (isolate_lru_page(page)) {
                                if (!mem_cgroup_move_account(page, true,
                                                             mc.from, mc.to)) {
                                        mc.precharge -= HPAGE_PMD_NR;
@@ -6226,7 +6226,7 @@ retry:
                         */
                        if (PageTransCompound(page))
                                goto put;
-                       if (!device && isolate_lru_page(page))
+                       if (!device && !isolate_lru_page(page))
                                goto put;
                        if (!mem_cgroup_move_account(page, false,
                                                mc.from, mc.to)) {
index db85c2d37f70ac95eaae9dce0e492e5bc650b746..e504362fdb23508d003e7045d2a89bebef5932ce 100644 (file)
@@ -846,7 +846,7 @@ static const char * const action_page_types[] = {
  */
 static int delete_from_lru_cache(struct page *p)
 {
-       if (!isolate_lru_page(p)) {
+       if (isolate_lru_page(p)) {
                /*
                 * Clear sensible page flags, so that the buddy system won't
                 * complain when the page is unpoison-and-freed.
@@ -2513,7 +2513,7 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
                bool lru = !__PageMovable(page);
 
                if (lru)
-                       isolated = !isolate_lru_page(page);
+                       isolated = isolate_lru_page(page);
                else
                        isolated = !isolate_movable_page(page,
                                                         ISOLATE_UNEVICTABLE);
index a1e8c3e9ab080893780661366899240f713adc83..5fc2dcf4e3abe8f36660e895d87b5fd9d92fda4d 100644 (file)
@@ -1632,6 +1632,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
                struct folio *folio;
+               bool isolated;
 
                if (!pfn_valid(pfn))
                        continue;
@@ -1667,9 +1668,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                 * We can skip free pages. And we can deal with pages on
                 * LRU and non-lru movable pages.
                 */
-               if (PageLRU(page))
-                       ret = isolate_lru_page(page);
-               else
+               if (PageLRU(page)) {
+                       isolated = isolate_lru_page(page);
+                       ret = isolated ? 0 : -EBUSY;
+               } else
                        ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
                if (!ret) { /* Success */
                        list_add_tail(&page->lru, &source);
index ef68a1aff35c0fb2ca7a4b6d911a0fb193061977..53010a142e7feba7774bee05e57439d17f5e1965 100644 (file)
@@ -2132,11 +2132,14 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
                }
        } else {
                struct page *head;
+               bool isolated;
 
                head = compound_head(page);
-               err = isolate_lru_page(head);
-               if (err)
+               isolated = isolate_lru_page(head);
+               if (!isolated) {
+                       err = -EBUSY;
                        goto out_putpage;
+               }
 
                err = 1;
                list_add_tail(&head->lru, pagelist);
@@ -2541,7 +2544,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
                return 0;
        }
 
-       if (isolate_lru_page(page))
+       if (!isolate_lru_page(page))
                return 0;
 
        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
index 6c3740318a98cede737f3e821363309b33a24dc1..d30c9de60b0d7d37dff51f68baa98428c0b05e9f 100644 (file)
@@ -388,7 +388,7 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns,
                                allow_drain = false;
                        }
 
-                       if (isolate_lru_page(page)) {
+                       if (!isolate_lru_page(page)) {
                                src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
                                restore++;
                                continue;