Now the isolate_hugetlb() only returns 0 or -EBUSY, and most users did not
care about the negative value, thus we can convert the isolate_hugetlb()
to return a boolean value to make code more clear when checking the
hugetlb isolation state. Moreover converts 2 users which will consider
the negative value returned by isolate_hugetlb().
No functional changes intended.
[akpm@linux-foundation.org: shorten locked section, per SeongJae Park]
Link: https://lkml.kernel.org/r/12a287c5bebc13df304387087bbecc6421510849.1676424378.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
-int isolate_hugetlb(struct folio *folio, struct list_head *list);
+bool isolate_hugetlb(struct folio *folio, struct list_head *list);
int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
return NULL;
}
-static inline int isolate_hugetlb(struct folio *folio, struct list_head *list)
+static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
{
- return -EBUSY;
+ return false;
}
static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
*/
goto free_new;
} else if (folio_ref_count(old_folio)) {
+ bool isolated;
+
/*
* Someone has grabbed the folio, try to isolate it here.
* Fail with -EBUSY if not possible.
*/
spin_unlock_irq(&hugetlb_lock);
- ret = isolate_hugetlb(old_folio, list);
+ isolated = isolate_hugetlb(old_folio, list);
+ ret = isolated ? 0 : -EBUSY;
spin_lock_irq(&hugetlb_lock);
goto free_new;
} else if (!folio_test_hugetlb_freed(old_folio)) {
if (hstate_is_gigantic(h))
return -ENOMEM;
- if (folio_ref_count(folio) && !isolate_hugetlb(folio, list))
+ if (folio_ref_count(folio) && isolate_hugetlb(folio, list))
ret = 0;
else if (!folio_ref_count(folio))
ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
* These functions are overwritable if your architecture needs its own
* behavior.
*/
-int isolate_hugetlb(struct folio *folio, struct list_head *list)
+bool isolate_hugetlb(struct folio *folio, struct list_head *list)
{
- int ret = 0;
+ bool ret = true;
spin_lock_irq(&hugetlb_lock);
if (!folio_test_hugetlb(folio) ||
!folio_test_hugetlb_migratable(folio) ||
!folio_try_get(folio)) {
- ret = -EBUSY;
+ ret = false;
goto unlock;
}
folio_clear_hugetlb_migratable(folio);
bool isolated = false;
if (PageHuge(page)) {
- isolated = !isolate_hugetlb(page_folio(page), pagelist);
+ isolated = isolate_hugetlb(page_folio(page), pagelist);
} else {
bool lru = !__PageMovable(page);
if (flags & (MPOL_MF_MOVE_ALL) ||
(flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 &&
!hugetlb_pmd_shared(pte))) {
- if (isolate_hugetlb(folio, qp->pagelist) &&
+ if (!isolate_hugetlb(folio, qp->pagelist) &&
(flags & MPOL_MF_STRICT))
/*
* Failed to isolate folio but allow migrating pages
struct vm_area_struct *vma;
struct page *page;
int err;
+ bool isolated;
mmap_read_lock(mm);
err = -EFAULT;
if (PageHuge(page)) {
if (PageHead(page)) {
- err = isolate_hugetlb(page_folio(page), pagelist);
- if (!err)
- err = 1;
+ isolated = isolate_hugetlb(page_folio(page), pagelist);
+ err = isolated ? 1 : -EBUSY;
}
} else {
struct page *head;
- bool isolated;
head = compound_head(page);
isolated = isolate_lru_page(head);