From: Anshuman Khandual Date: Fri, 9 Feb 2024 05:42:21 +0000 (+0530) Subject: mm/hugetlb: move page order check inside hugetlb_cma_reserve() X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=ce70cfb145ad98739c8ae23d070a37863a4c52bb;p=linux.git mm/hugetlb: move page order check inside hugetlb_cma_reserve() All platforms could benefit from page order check against MAX_PAGE_ORDER before allocating a CMA area for gigantic hugetlb pages. Let's move this check from individual platforms to generic hugetlb. Link: https://lkml.kernel.org/r/20240209054221.1403364-1-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Jane Chu Reviewed-by: David Hildenbrand Cc: Catalin Marinas Cc: Will Deacon Cc: Michael Ellerman Cc: Nicholas Piggin Signed-off-by: Andrew Morton --- diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 8116ac599f801..6720ec8d50e76 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -45,13 +45,6 @@ void __init arm64_hugetlb_cma_reserve(void) else order = CONT_PMD_SHIFT - PAGE_SHIFT; - /* - * HugeTLB CMA reservation is required for gigantic - * huge pages which could not be allocated via the - * page allocator. Just warn if there is any change - * breaking this assumption. - */ - WARN_ON(order <= MAX_PAGE_ORDER); hugetlb_cma_reserve(order); } #endif /* CONFIG_CMA */ diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 0a540b37aab62..16557d008eef5 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -614,8 +614,6 @@ void __init gigantic_hugetlb_cma_reserve(void) */ order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT; - if (order) { - VM_WARN_ON(order <= MAX_PAGE_ORDER); + if (order) hugetlb_cma_reserve(order); - } } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3d651fc1dd668..c53a41d07cd3a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7720,6 +7720,13 @@ void __init hugetlb_cma_reserve(int order) bool node_specific_cma_alloc = false; int nid; + /* + * HugeTLB CMA reservation is required for gigantic + * huge pages which could not be allocated via the + * page allocator. Just warn if there is any change + * breaking this assumption. + */ + VM_WARN_ON(order <= MAX_PAGE_ORDER); cma_reserve_called = true; if (!hugetlb_cma_size)