mm/page_alloc: add helper for checking if check_pages_enabled
authorMike Rapoport (IBM) <rppt@kernel.org>
Tue, 21 Mar 2023 17:05:01 +0000 (19:05 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 6 Apr 2023 02:42:52 +0000 (19:42 -0700)
Instead of duplicating long static_branch_enabled(&check_pages_enabled)
wrap it in a helper function is_check_pages_enabled()

Link: https://lkml.kernel.org/r/20230321170513.2401534-3-rppt@kernel.org
Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Doug Berger <opendmb@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page_alloc.c

index 4b09711b6f0feaf7862e867a75d24db436d305eb..33925488040fe098d1b6d5af970b68f1e441771f 100644 (file)
@@ -245,6 +245,11 @@ EXPORT_SYMBOL(init_on_free);
 /* perform sanity checks on struct pages being allocated or freed */
 static DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
 
+static inline bool is_check_pages_enabled(void)
+{
+       return static_branch_unlikely(&check_pages_enabled);
+}
+
 static bool _init_on_alloc_enabled_early __read_mostly
                                = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
 static int __init early_init_on_alloc(char *buf)
@@ -1450,7 +1455,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
                for (i = 1; i < (1 << order); i++) {
                        if (compound)
                                bad += free_tail_pages_check(page, page + i);
-                       if (static_branch_unlikely(&check_pages_enabled)) {
+                       if (is_check_pages_enabled()) {
                                if (unlikely(free_page_is_bad(page + i))) {
                                        bad++;
                                        continue;
@@ -1463,7 +1468,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
                page->mapping = NULL;
        if (memcg_kmem_online() && PageMemcgKmem(page))
                __memcg_kmem_uncharge_page(page, order);
-       if (static_branch_unlikely(&check_pages_enabled)) {
+       if (is_check_pages_enabled()) {
                if (free_page_is_bad(page))
                        bad++;
                if (bad)
@@ -2373,7 +2378,7 @@ static int check_new_page(struct page *page)
 
 static inline bool check_new_pages(struct page *page, unsigned int order)
 {
-       if (static_branch_unlikely(&check_pages_enabled)) {
+       if (is_check_pages_enabled()) {
                for (int i = 0; i < (1 << order); i++) {
                        struct page *p = page + i;